diff --git a/.gitignore b/.gitignore
index ae3df758..f5a3ec30 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,4 +3,8 @@
.vscode/
# Binary artifacts
-bin/
\ No newline at end of file
+bin/
+main_test.go
+vendor
+*.txt
+old
diff --git a/Makefile b/Makefile
index 4f3230b9..1f2c88b7 100644
--- a/Makefile
+++ b/Makefile
@@ -1,5 +1,5 @@
UID_GID ?= $(shell id -u):$(shell id -g)
-GO_VERSION ?= 1.14.4
+GO_VERSION ?= 1.15.6
GIT_VERSION := $(shell hack/ldflags.sh --version-only)
PROJECT := github.com/weaveworks/libgitops
BOUNDING_API_DIRS := ${PROJECT}/cmd/apis/sample
@@ -7,7 +7,6 @@ API_DIRS := ${PROJECT}/cmd/sample-app/apis/sample,${PROJECT}/cmd/sample-app/apis
SRC_PKGS := cmd pkg
DOCKER_ARGS := --rm
CACHE_DIR := $(shell pwd)/bin/cache
-API_DOCS := api/sample-app.md api/runtime.md
BINARIES := bin/sample-app bin/sample-gitops bin/sample-watch
# If we're not running in CI, run Docker interactively
@@ -39,7 +38,6 @@ test-internal:
tidy: docker-tidy-internal
tidy-internal: /go/bin/goimports
go mod tidy
- hack/generate-client.sh
gofmt -s -w ${SRC_PKGS}
goimports -w ${SRC_PKGS}
diff --git a/cmd/common/common.go b/cmd/common/common.go
index dcba7c68..f011dace 100644
--- a/cmd/common/common.go
+++ b/cmd/common/common.go
@@ -13,8 +13,8 @@ import (
"github.com/spf13/pflag"
"github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1"
"github.com/weaveworks/libgitops/cmd/sample-app/version"
- "github.com/weaveworks/libgitops/pkg/runtime"
- "github.com/weaveworks/libgitops/pkg/storage"
+ "github.com/weaveworks/libgitops/pkg/storage/client"
+ "github.com/weaveworks/libgitops/pkg/storage/core"
)
var (
@@ -25,10 +25,6 @@ func init() {
rand.Seed(time.Now().UnixNano())
}
-func CarKeyForName(name string) storage.ObjectKey {
- return storage.NewObjectKey(storage.NewKindKey(CarGVK), runtime.NewIdentifier("default/"+name))
-}
-
func NewCar(name string) *v1alpha1.Car {
obj := &v1alpha1.Car{}
obj.Name = name
@@ -38,17 +34,17 @@ func NewCar(name string) *v1alpha1.Car {
return obj
}
-func SetNewCarStatus(s storage.Storage, key storage.ObjectKey) error {
- obj, err := s.Get(key)
+func SetNewCarStatus(ctx context.Context, c client.Client, name string) error {
+ car := &v1alpha1.Car{}
+ err := c.Get(ctx, core.ObjectKey{Name: name}, car)
if err != nil {
return err
}
- car := obj.(*v1alpha1.Car)
car.Status.Distance = rand.Uint64()
car.Status.Speed = rand.Float64() * 100
- return s.Update(car)
+ return c.Update(ctx, car)
}
func ParseVersionFlag() {
@@ -75,8 +71,8 @@ func NewEcho() *echo.Echo {
func StartEcho(e *echo.Echo) error {
// Start the server
go func() {
- if err := e.Start(":8888"); err != nil {
- e.Logger.Info("shutting down the server")
+ if err := e.Start(":8881"); err != nil {
+ e.Logger.Info("shutting down the server", err)
}
}()
diff --git a/pkg/logs/flag/flag.go b/cmd/common/logs/flag/flag.go
similarity index 84%
rename from pkg/logs/flag/flag.go
rename to cmd/common/logs/flag/flag.go
index 3c226cfe..83f59678 100644
--- a/pkg/logs/flag/flag.go
+++ b/cmd/common/logs/flag/flag.go
@@ -5,6 +5,9 @@ import (
"github.com/spf13/pflag"
)
+// TODO: Use these flags in the sample binaries?
+// TODO: Move to the way controller-runtime does logs instead?
+
type LogLevelFlag struct {
value *logrus.Level
}
diff --git a/pkg/logs/logs.go b/cmd/common/logs/logs.go
similarity index 95%
rename from pkg/logs/logs.go
rename to cmd/common/logs/logs.go
index 1ca78f1b..c5b11a85 100644
--- a/pkg/logs/logs.go
+++ b/cmd/common/logs/logs.go
@@ -8,6 +8,8 @@ import (
log "github.com/sirupsen/logrus"
)
+// TODO: Move to the way controller-runtime does logs instead?
+
// Quiet specifies whether to only print machine-readable IDs
var Quiet bool
diff --git a/cmd/sample-app/client/client.go b/cmd/sample-app/client/client.go
deleted file mode 100644
index e4d98247..00000000
--- a/cmd/sample-app/client/client.go
+++ /dev/null
@@ -1,61 +0,0 @@
-// TODO: Docs
-
-// +build ignore
-
-package client
-
-import (
- api "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample"
- "github.com/weaveworks/libgitops/pkg/client"
- "github.com/weaveworks/libgitops/pkg/runtime"
- "github.com/weaveworks/libgitops/pkg/storage"
-
- "k8s.io/apimachinery/pkg/runtime/schema"
-)
-
-// TODO: Autogenerate this!
-
-// NewClient creates a client for the specified storage
-func NewClient(s storage.Storage) *Client {
- return &Client{
- SampleInternalClient: NewSampleInternalClient(s),
- }
-}
-
-// Client is a struct providing high-level access to objects in a storage
-// The resource-specific client interfaces are automatically generated based
-// off client_resource_template.go. The auto-generation can be done with hack/client.sh
-// At the moment SampleInternalClient is the default client. If more than this client
-// is created in the future, the SampleInternalClient will be accessible under
-// Client.SampleInternal() instead.
-type Client struct {
- *SampleInternalClient
-}
-
-func NewSampleInternalClient(s storage.Storage) *SampleInternalClient {
- return &SampleInternalClient{
- storage: s,
- dynamicClients: map[schema.GroupVersionKind]client.DynamicClient{},
- gv: api.SchemeGroupVersion,
- }
-}
-
-type SampleInternalClient struct {
- storage storage.Storage
- gv schema.GroupVersion
- carClient CarClient
- motorcycleClient MotorcycleClient
- dynamicClients map[schema.GroupVersionKind]client.DynamicClient
-}
-
-// Dynamic returns the DynamicClient for the Client instance, for the specific kind
-func (c *SampleInternalClient) Dynamic(kind runtime.Kind) (dc client.DynamicClient) {
- var ok bool
- gvk := c.gv.WithKind(kind.Title())
- if dc, ok = c.dynamicClients[gvk]; !ok {
- dc = client.NewDynamicClient(c.storage, gvk)
- c.dynamicClients[gvk] = dc
- }
-
- return
-}
diff --git a/cmd/sample-app/client/zz_generated.client_car.go b/cmd/sample-app/client/zz_generated.client_car.go
deleted file mode 100644
index 2661d453..00000000
--- a/cmd/sample-app/client/zz_generated.client_car.go
+++ /dev/null
@@ -1,152 +0,0 @@
-// +build ignore
-
-/*
- Note: This file is autogenerated! Do not edit it manually!
- Edit client_car_template.go instead, and run
- hack/generate-client.sh afterwards.
-*/
-
-package client
-
-import (
- "fmt"
-
- api "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample"
-
- log "github.com/sirupsen/logrus"
- "github.com/weaveworks/libgitops/pkg/runtime"
- "github.com/weaveworks/libgitops/pkg/storage"
- "github.com/weaveworks/libgitops/pkg/storage/filterer"
- "k8s.io/apimachinery/pkg/runtime/schema"
-)
-
-// CarClient is an interface for accessing Car-specific API objects
-type CarClient interface {
- // New returns a new Car
- New() *api.Car
- // Get returns the Car matching given UID from the storage
- Get(runtime.UID) (*api.Car, error)
- // Set saves the given Car into persistent storage
- Set(*api.Car) error
- // Patch performs a strategic merge patch on the object with
- // the given UID, using the byte-encoded patch given
- Patch(runtime.UID, []byte) error
- // Find returns the Car matching the given filter, filters can
- // match e.g. the Object's Name, UID or a specific property
- Find(filter filterer.BaseFilter) (*api.Car, error)
- // FindAll returns multiple Cars matching the given filter, filters can
- // match e.g. the Object's Name, UID or a specific property
- FindAll(filter filterer.BaseFilter) ([]*api.Car, error)
- // Delete deletes the Car with the given UID from the storage
- Delete(uid runtime.UID) error
- // List returns a list of all Cars available
- List() ([]*api.Car, error)
-}
-
-// Cars returns the CarClient for the Client object
-func (c *SampleInternalClient) Cars() CarClient {
- if c.carClient == nil {
- c.carClient = newCarClient(c.storage, c.gv)
- }
-
- return c.carClient
-}
-
-// carClient is a struct implementing the CarClient interface
-// It uses a shared storage instance passed from the Client together with its own Filterer
-type carClient struct {
- storage storage.Storage
- filterer *filterer.Filterer
- gvk schema.GroupVersionKind
-}
-
-// newCarClient builds the carClient struct using the storage implementation and a new Filterer
-func newCarClient(s storage.Storage, gv schema.GroupVersion) CarClient {
- return &carClient{
- storage: s,
- filterer: filterer.NewFilterer(s),
- gvk: gv.WithKind(api.KindCar.Title()),
- }
-}
-
-// New returns a new Object of its kind
-func (c *carClient) New() *api.Car {
- log.Tracef("Client.New; GVK: %v", c.gvk)
- obj, err := c.storage.New(c.gvk)
- if err != nil {
- panic(fmt.Sprintf("Client.New must not return an error: %v", err))
- }
- return obj.(*api.Car)
-}
-
-// Find returns a single Car based on the given Filter
-func (c *carClient) Find(filter filterer.BaseFilter) (*api.Car, error) {
- log.Tracef("Client.Find; GVK: %v", c.gvk)
- object, err := c.filterer.Find(c.gvk, filter)
- if err != nil {
- return nil, err
- }
-
- return object.(*api.Car), nil
-}
-
-// FindAll returns multiple Cars based on the given Filter
-func (c *carClient) FindAll(filter filterer.BaseFilter) ([]*api.Car, error) {
- log.Tracef("Client.FindAll; GVK: %v", c.gvk)
- matches, err := c.filterer.FindAll(c.gvk, filter)
- if err != nil {
- return nil, err
- }
-
- results := make([]*api.Car, 0, len(matches))
- for _, item := range matches {
- results = append(results, item.(*api.Car))
- }
-
- return results, nil
-}
-
-// Get returns the Car matching given UID from the storage
-func (c *carClient) Get(uid runtime.UID) (*api.Car, error) {
- log.Tracef("Client.Get; UID: %q, GVK: %v", uid, c.gvk)
- object, err := c.storage.Get(c.gvk, uid)
- if err != nil {
- return nil, err
- }
-
- return object.(*api.Car), nil
-}
-
-// Set saves the given Car into the persistent storage
-func (c *carClient) Set(car *api.Car) error {
- log.Tracef("Client.Set; UID: %q, GVK: %v", car.GetUID(), c.gvk)
- return c.storage.Set(c.gvk, car)
-}
-
-// Patch performs a strategic merge patch on the object with
-// the given UID, using the byte-encoded patch given
-func (c *carClient) Patch(uid runtime.UID, patch []byte) error {
- return c.storage.Patch(c.gvk, uid, patch)
-}
-
-// Delete deletes the Car from the storage
-func (c *carClient) Delete(uid runtime.UID) error {
- log.Tracef("Client.Delete; UID: %q, GVK: %v", uid, c.gvk)
- return c.storage.Delete(c.gvk, uid)
-}
-
-// List returns a list of all Cars available
-func (c *carClient) List() ([]*api.Car, error) {
- log.Tracef("Client.List; GVK: %v", c.gvk)
- list, err := c.storage.List(c.gvk)
- if err != nil {
- return nil, err
- }
-
- results := make([]*api.Car, 0, len(list))
- for _, item := range list {
- results = append(results, item.(*api.Car))
- }
-
- return results, nil
-}
diff --git a/cmd/sample-app/client/zz_generated.client_motorcycle.go b/cmd/sample-app/client/zz_generated.client_motorcycle.go
deleted file mode 100644
index 7256e003..00000000
--- a/cmd/sample-app/client/zz_generated.client_motorcycle.go
+++ /dev/null
@@ -1,152 +0,0 @@
-// +build ignore
-
-/*
- Note: This file is autogenerated! Do not edit it manually!
- Edit client_motorcycle_template.go instead, and run
- hack/generate-client.sh afterwards.
-*/
-
-package client
-
-import (
- "fmt"
-
- api "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample"
-
- log "github.com/sirupsen/logrus"
- "github.com/weaveworks/libgitops/pkg/runtime"
- "github.com/weaveworks/libgitops/pkg/storage"
- "github.com/weaveworks/libgitops/pkg/storage/filterer"
- "k8s.io/apimachinery/pkg/runtime/schema"
-)
-
-// MotorcycleClient is an interface for accessing Motorcycle-specific API objects
-type MotorcycleClient interface {
- // New returns a new Motorcycle
- New() *api.Motorcycle
- // Get returns the Motorcycle matching given UID from the storage
- Get(runtime.UID) (*api.Motorcycle, error)
- // Set saves the given Motorcycle into persistent storage
- Set(*api.Motorcycle) error
- // Patch performs a strategic merge patch on the object with
- // the given UID, using the byte-encoded patch given
- Patch(runtime.UID, []byte) error
- // Find returns the Motorcycle matching the given filter, filters can
- // match e.g. the Object's Name, UID or a specific property
- Find(filter filterer.BaseFilter) (*api.Motorcycle, error)
- // FindAll returns multiple Motorcycles matching the given filter, filters can
- // match e.g. the Object's Name, UID or a specific property
- FindAll(filter filterer.BaseFilter) ([]*api.Motorcycle, error)
- // Delete deletes the Motorcycle with the given UID from the storage
- Delete(uid runtime.UID) error
- // List returns a list of all Motorcycles available
- List() ([]*api.Motorcycle, error)
-}
-
-// Motorcycles returns the MotorcycleClient for the Client object
-func (c *SampleInternalClient) Motorcycles() MotorcycleClient {
- if c.motorcycleClient == nil {
- c.motorcycleClient = newMotorcycleClient(c.storage, c.gv)
- }
-
- return c.motorcycleClient
-}
-
-// motorcycleClient is a struct implementing the MotorcycleClient interface
-// It uses a shared storage instance passed from the Client together with its own Filterer
-type motorcycleClient struct {
- storage storage.Storage
- filterer *filterer.Filterer
- gvk schema.GroupVersionKind
-}
-
-// newMotorcycleClient builds the motorcycleClient struct using the storage implementation and a new Filterer
-func newMotorcycleClient(s storage.Storage, gv schema.GroupVersion) MotorcycleClient {
- return &motorcycleClient{
- storage: s,
- filterer: filterer.NewFilterer(s),
- gvk: gv.WithKind(api.KindMotorcycle.Title()),
- }
-}
-
-// New returns a new Object of its kind
-func (c *motorcycleClient) New() *api.Motorcycle {
- log.Tracef("Client.New; GVK: %v", c.gvk)
- obj, err := c.storage.New(c.gvk)
- if err != nil {
- panic(fmt.Sprintf("Client.New must not return an error: %v", err))
- }
- return obj.(*api.Motorcycle)
-}
-
-// Find returns a single Motorcycle based on the given Filter
-func (c *motorcycleClient) Find(filter filterer.BaseFilter) (*api.Motorcycle, error) {
- log.Tracef("Client.Find; GVK: %v", c.gvk)
- object, err := c.filterer.Find(c.gvk, filter)
- if err != nil {
- return nil, err
- }
-
- return object.(*api.Motorcycle), nil
-}
-
-// FindAll returns multiple Motorcycles based on the given Filter
-func (c *motorcycleClient) FindAll(filter filterer.BaseFilter) ([]*api.Motorcycle, error) {
- log.Tracef("Client.FindAll; GVK: %v", c.gvk)
- matches, err := c.filterer.FindAll(c.gvk, filter)
- if err != nil {
- return nil, err
- }
-
- results := make([]*api.Motorcycle, 0, len(matches))
- for _, item := range matches {
- results = append(results, item.(*api.Motorcycle))
- }
-
- return results, nil
-}
-
-// Get returns the Motorcycle matching given UID from the storage
-func (c *motorcycleClient) Get(uid runtime.UID) (*api.Motorcycle, error) {
- log.Tracef("Client.Get; UID: %q, GVK: %v", uid, c.gvk)
- object, err := c.storage.Get(c.gvk, uid)
- if err != nil {
- return nil, err
- }
-
- return object.(*api.Motorcycle), nil
-}
-
-// Set saves the given Motorcycle into the persistent storage
-func (c *motorcycleClient) Set(motorcycle *api.Motorcycle) error {
- log.Tracef("Client.Set; UID: %q, GVK: %v", motorcycle.GetUID(), c.gvk)
- return c.storage.Set(c.gvk, motorcycle)
-}
-
-// Patch performs a strategic merge patch on the object with
-// the given UID, using the byte-encoded patch given
-func (c *motorcycleClient) Patch(uid runtime.UID, patch []byte) error {
- return c.storage.Patch(c.gvk, uid, patch)
-}
-
-// Delete deletes the Motorcycle from the storage
-func (c *motorcycleClient) Delete(uid runtime.UID) error {
- log.Tracef("Client.Delete; UID: %q, GVK: %v", uid, c.gvk)
- return c.storage.Delete(c.gvk, uid)
-}
-
-// List returns a list of all Motorcycles available
-func (c *motorcycleClient) List() ([]*api.Motorcycle, error) {
- log.Tracef("Client.List; GVK: %v", c.gvk)
- list, err := c.storage.List(c.gvk)
- if err != nil {
- return nil, err
- }
-
- results := make([]*api.Motorcycle, 0, len(list))
- for _, item := range list {
- results = append(results, item.(*api.Motorcycle))
- }
-
- return results, nil
-}
diff --git a/cmd/sample-app/main.go b/cmd/sample-app/main.go
index ea119a95..957e9ca1 100644
--- a/cmd/sample-app/main.go
+++ b/cmd/sample-app/main.go
@@ -2,7 +2,10 @@ package main
import (
"bytes"
+ "context"
+ "encoding/json"
"fmt"
+ "io/ioutil"
"net/http"
"os"
@@ -10,12 +13,20 @@ import (
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
"github.com/weaveworks/libgitops/cmd/common"
+ "github.com/weaveworks/libgitops/cmd/common/logs"
"github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/scheme"
"github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1"
- "github.com/weaveworks/libgitops/pkg/logs"
- "github.com/weaveworks/libgitops/pkg/runtime"
"github.com/weaveworks/libgitops/pkg/serializer"
"github.com/weaveworks/libgitops/pkg/storage"
+ "github.com/weaveworks/libgitops/pkg/storage/backend"
+ "github.com/weaveworks/libgitops/pkg/storage/client"
+ "github.com/weaveworks/libgitops/pkg/storage/core"
+ "github.com/weaveworks/libgitops/pkg/storage/filesystem"
+ "github.com/weaveworks/libgitops/pkg/storage/kube"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/types"
+ ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
)
var manifestDirFlag = pflag.String("data-dir", "/tmp/libgitops/manifest", "Where to store the YAML files")
@@ -25,27 +36,50 @@ func main() {
common.ParseVersionFlag()
// Run the application
- if err := run(); err != nil {
+ if err := run(*manifestDirFlag); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
-func run() error {
+func run(manifestDir string) error {
+ ctx := context.Background()
// Create the manifest directory
- if err := os.MkdirAll(*manifestDirFlag, 0755); err != nil {
+ if err := os.MkdirAll(manifestDir, 0755); err != nil {
return err
}
// Set the log level
logs.Logger.SetLevel(logrus.InfoLevel)
- plainStorage := storage.NewGenericStorage(
- storage.NewGenericRawStorage(*manifestDirFlag, v1alpha1.SchemeGroupVersion, serializer.ContentTypeYAML),
- scheme.Serializer,
- []runtime.IdentifierFactory{runtime.Metav1NameIdentifier},
+ s, err := filesystem.NewSimpleStorage(
+ manifestDir,
+ storage.StaticNamespacer{NamespacedIsDefaultPolicy: false},
+ filesystem.SimpleFileFinderOptions{
+ DisableGroupDirectory: true,
+ ContentType: serializer.ContentTypeYAML,
+ },
)
- defer func() { _ = plainStorage.Close() }()
+ if err != nil {
+ return err
+ }
+
+ // Just use default encoders and decoders
+ encoder := scheme.Serializer.Encoder()
+ decoder := scheme.Serializer.Decoder()
+
+ // Use the version information in the scheme to determine the storage version
+ versioner := backend.SchemePreferredVersioner{Scheme: scheme.Scheme}
+
+ b, err := backend.NewGeneric(s, encoder, decoder, kube.NewNamespaceEnforcer(), versioner, nil)
+ if err != nil {
+ return err
+ }
+
+ plainClient, err := client.NewGeneric(b)
+ if err != nil {
+ return err
+ }
e := common.NewEcho()
@@ -55,7 +89,43 @@ func run() error {
return echo.NewHTTPError(http.StatusBadRequest, "Please set name")
}
- obj, err := plainStorage.Get(common.CarKeyForName(name))
+ obj := &v1alpha1.Car{}
+ err := plainClient.Get(ctx, core.ObjectKey{Name: name}, obj)
+ if err != nil {
+ return err
+ }
+ var content bytes.Buffer
+ if err := scheme.Serializer.Encoder().Encode(serializer.NewJSONFrameWriter(&content), obj); err != nil {
+ return err
+ }
+ return c.JSONBlob(http.StatusOK, content.Bytes())
+ })
+
+ e.GET("/meta/", func(c echo.Context) error {
+ list := &metav1.PartialObjectMetadataList{}
+ list.SetGroupVersionKind(v1alpha1.SchemeGroupVersion.WithKind("CarList"))
+ err := plainClient.List(ctx, list)
+ if err != nil {
+ return err
+ }
+ var content bytes.Buffer
+ if err := scheme.Serializer.Encoder().Encode(serializer.NewJSONFrameWriter(&content), list); err != nil {
+ return err
+ }
+ return c.JSONBlob(http.StatusOK, content.Bytes())
+ })
+
+ e.GET("/meta/:name", func(c echo.Context) error {
+ name := c.Param("name")
+ if len(name) == 0 {
+ return echo.NewHTTPError(http.StatusBadRequest, "Please set name")
+ }
+
+ obj := &metav1.PartialObjectMetadata{}
+ obj.SetGroupVersionKind(v1alpha1.SchemeGroupVersion.WithKind("Car"))
+ err := plainClient.Get(ctx, core.ObjectKey{
+ Name: name,
+ }, obj)
if err != nil {
return err
}
@@ -66,13 +136,57 @@ func run() error {
return c.JSONBlob(http.StatusOK, content.Bytes())
})
+ e.GET("/unstructured/", func(c echo.Context) error {
+ list := &unstructured.UnstructuredList{}
+ list.SetGroupVersionKind(v1alpha1.SchemeGroupVersion.WithKind("CarList"))
+ err := plainClient.List(ctx, list)
+ if err != nil {
+ return err
+ }
+ var content bytes.Buffer
+ if err := scheme.Serializer.Encoder().Encode(serializer.NewJSONFrameWriter(&content), list); err != nil {
+ return err
+ }
+ var newcontent bytes.Buffer
+ if err := json.Indent(&newcontent, content.Bytes(), "", " "); err != nil {
+ return err
+ }
+ return c.JSONBlob(http.StatusOK, newcontent.Bytes())
+ })
+
+ e.GET("/unstructured/:name", func(c echo.Context) error {
+ name := c.Param("name")
+ if len(name) == 0 {
+ return echo.NewHTTPError(http.StatusBadRequest, "Please set name")
+ }
+
+ obj := &unstructured.Unstructured{}
+ obj.SetGroupVersionKind(v1alpha1.SchemeGroupVersion.WithKind("Car"))
+ err := plainClient.Get(ctx, core.ObjectKey{
+ Name: name,
+ }, obj)
+ if err != nil {
+ return err
+ }
+ var content bytes.Buffer
+ // This does for some reason not pretty-encode the output
+ if err := scheme.Serializer.Encoder().Encode(serializer.NewJSONFrameWriter(&content), obj); err != nil {
+ return err
+ }
+ var newcontent bytes.Buffer
+ if err := json.Indent(&newcontent, content.Bytes(), "", " "); err != nil {
+ return err
+ }
+ return c.JSONBlob(http.StatusOK, newcontent.Bytes())
+ })
+
e.POST("/plain/:name", func(c echo.Context) error {
name := c.Param("name")
if len(name) == 0 {
return echo.NewHTTPError(http.StatusBadRequest, "Please set name")
}
- if err := plainStorage.Create(common.NewCar(name)); err != nil {
+ if err := plainClient.Create(ctx, common.NewCar(name)); err != nil {
return err
}
return c.String(200, "OK!")
@@ -84,11 +198,45 @@ func run() error {
return echo.NewHTTPError(http.StatusBadRequest, "Please set name")
}
- if err := common.SetNewCarStatus(plainStorage, common.CarKeyForName(name)); err != nil {
+ if err := common.SetNewCarStatus(ctx, plainClient, name); err != nil {
return err
}
return c.String(200, "OK!")
})
+ e.PATCH("/plain/:name", func(c echo.Context) error {
+ name := c.Param("name")
+ if len(name) == 0 {
+ return echo.NewHTTPError(http.StatusBadRequest, "Please set name")
+ }
+
+ body, err := ioutil.ReadAll(c.Request().Body)
+ if err != nil {
+ return err
+ }
+ c.Request().Body.Close()
+
+ car := &v1alpha1.Car{}
+ err = plainClient.Get(ctx, core.ObjectKey{
+ Name: name,
+ }, car)
+ if err != nil {
+ return err
+ }
+
+ if err := plainClient.Patch(ctx, car, ctrlclient.RawPatch(types.MergePatchType, body)); err != nil {
+ return err
+ }
+
+ return c.JSON(200, car)
+ })
+
return common.StartEcho(e)
}
+
+/*
+type noNamespacesRESTMapper struct{}
+
+func (noNamespacesRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) {
+ return &meta.RESTMapping{Scope: meta.RESTScopeRoot}, nil
+}*/
diff --git a/cmd/sample-gitops/main.go b/cmd/sample-gitops/main.go
index e8c21805..2cee498c 100644
--- a/cmd/sample-gitops/main.go
+++ b/cmd/sample-gitops/main.go
@@ -1,9 +1,11 @@
package main
import (
+ "bytes"
"context"
"fmt"
"io/ioutil"
+ "math/rand"
"net/http"
"os"
"time"
@@ -15,14 +17,25 @@ import (
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
"github.com/weaveworks/libgitops/cmd/common"
+ "github.com/weaveworks/libgitops/cmd/common/logs"
"github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/scheme"
- "github.com/weaveworks/libgitops/pkg/gitdir"
- "github.com/weaveworks/libgitops/pkg/logs"
+ "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1"
+ "github.com/weaveworks/libgitops/pkg/serializer"
"github.com/weaveworks/libgitops/pkg/storage"
- "github.com/weaveworks/libgitops/pkg/storage/transaction"
- githubpr "github.com/weaveworks/libgitops/pkg/storage/transaction/pullrequest/github"
- "github.com/weaveworks/libgitops/pkg/storage/watch"
- "github.com/weaveworks/libgitops/pkg/storage/watch/update"
+ "github.com/weaveworks/libgitops/pkg/storage/backend"
+ "github.com/weaveworks/libgitops/pkg/storage/client"
+ "github.com/weaveworks/libgitops/pkg/storage/client/transactional"
+ "github.com/weaveworks/libgitops/pkg/storage/client/transactional/distributed"
+ "github.com/weaveworks/libgitops/pkg/storage/client/transactional/distributed/git"
+ githubpr "github.com/weaveworks/libgitops/pkg/storage/client/transactional/distributed/git/github"
+ "github.com/weaveworks/libgitops/pkg/storage/core"
+ "github.com/weaveworks/libgitops/pkg/storage/event"
+ "github.com/weaveworks/libgitops/pkg/storage/filesystem"
+ unstructuredfs "github.com/weaveworks/libgitops/pkg/storage/filesystem/unstructured"
+ unstructuredevent "github.com/weaveworks/libgitops/pkg/storage/filesystem/unstructured/event"
+ unstructuredtx "github.com/weaveworks/libgitops/pkg/storage/filesystem/unstructured/tx"
+ "github.com/weaveworks/libgitops/pkg/storage/kube"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
var (
@@ -30,8 +43,9 @@ var (
authorNameFlag = pflag.String("author-name", defaultAuthorName, "Author name for Git commits")
authorEmailFlag = pflag.String("author-email", defaultAuthorEmail, "Author email for Git commits")
gitURLFlag = pflag.String("git-url", "", "HTTPS Git URL; where the Git repository is, e.g. https://github.com/luxas/ignite-gitops")
- prAssigneeFlag = pflag.StringSlice("pr-assignees", nil, "What user logins to assign for the created PR. The user must have pull access to the repo.")
prMilestoneFlag = pflag.String("pr-milestone", "", "What milestone to tag the PR with")
+ prAssigneesFlag = pflag.StringSlice("pr-assignees", nil, "What user logins to assign for the created PR. The user must have pull access to the repo.")
+ prLabelsFlag = pflag.StringSlice("pr-labels", nil, "What labels to apply on the created PR. The labels must already exist. E.g. \"user/bot,actuator/libgitops,kind/status-update\"")
)
const (
@@ -46,7 +60,16 @@ func main() {
common.ParseVersionFlag()
// Run the application
- if err := run(*identityFlag, *gitURLFlag, os.Getenv("GITHUB_TOKEN"), *authorNameFlag, *authorEmailFlag); err != nil {
+ if err := run(
+ *identityFlag,
+ *gitURLFlag,
+ os.Getenv("GITHUB_TOKEN"),
+ *authorNameFlag,
+ *authorEmailFlag,
+ *prMilestoneFlag,
+ *prAssigneesFlag,
+ *prLabelsFlag,
+ ); err != nil {
fmt.Println(err)
os.Exit(1)
}
@@ -60,7 +83,8 @@ func expandAndRead(filePath string) ([]byte, error) {
return ioutil.ReadFile(expandedPath)
}
-func run(identityFile, gitURL, ghToken, authorName, authorEmail string) error {
+func run(identityFile, gitURL, ghToken, authorName, authorEmail, prMilestone string,
+ prAssignees, prLabels []string) error {
// Validate parameters
if len(identityFile) == 0 {
return fmt.Errorf("--identity-file is required")
@@ -69,7 +93,7 @@ func run(identityFile, gitURL, ghToken, authorName, authorEmail string) error {
return fmt.Errorf("--git-url is required")
}
if len(ghToken) == 0 {
- return fmt.Errorf("--github-token is required")
+ return fmt.Errorf("GITHUB_TOKEN is required")
}
if len(authorName) == 0 {
return fmt.Errorf("--author-name is required")
@@ -78,6 +102,9 @@ func run(identityFile, gitURL, ghToken, authorName, authorEmail string) error {
return fmt.Errorf("--author-email is required")
}
+ // Set the log level
+ logs.Logger.SetLevel(logrus.TraceLevel)
+
// Read the identity and known_hosts files
identityContent, err := expandAndRead(identityFile)
if err != nil {
@@ -101,58 +128,112 @@ func run(identityFile, gitURL, ghToken, authorName, authorEmail string) error {
}
// Authenticate to the GitDirectory using Git SSH
- authMethod, err := gitdir.NewSSHAuthMethod(identityContent, knownHostsContent)
+ authMethod, err := git.NewSSHAuthMethod(identityContent, knownHostsContent)
if err != nil {
return err
}
- // Construct the GitDirectory implementation which backs the storage
- gitDir, err := gitdir.NewGitDirectory(repoRef, gitdir.GitDirectoryOptions{
- Branch: "master",
- Interval: 10 * time.Second,
- AuthMethod: authMethod,
- })
+ ctx, cancel := context.WithCancel(context.Background())
+
+ defer func() { cancel() }()
+
+ // Construct the LocalClone implementation which backs the storage
+ localClone, err := git.NewLocalClone(ctx, repoRef, authMethod, git.Branch("master"))
if err != nil {
return err
}
- // Create a new PR provider for the GitStorage
- prProvider, err := githubpr.NewGitHubPRProvider(ghClient)
+ ctx = core.WithMutableVersionRef(ctx, localClone.MainBranch())
+
+ // Just use default encoders and decoders
+ encoder := scheme.Serializer.Encoder()
+ decoder := scheme.Serializer.Decoder()
+
+ rawManifest, err := unstructuredevent.NewManifest(
+ localClone.Dir(),
+ filesystem.DefaultContentTyper,
+ storage.StaticNamespacer{NamespacedIsDefaultPolicy: false}, // all objects root-spaced
+ unstructuredfs.KubeObjectRecognizer{Decoder: decoder},
+ filesystem.DefaultPathExcluders(),
+ )
if err != nil {
return err
}
- // Create a new GitStorage using the GitDirectory, PR provider, and Serializer
- gitStorage, err := transaction.NewGitStorage(gitDir, prProvider, scheme.Serializer)
+
+ // Create the channel to receive events to, and register it with the EventStorage
+ updates := make(event.ObjectEventStream, 4096)
+ if err := rawManifest.WatchForObjectEvents(ctx, updates); err != nil {
+ return err
+ }
+
+ defer func() { _ = rawManifest.Close() }()
+
+ // Use the version information in the scheme to determine the storage version
+ versioner := backend.SchemePreferredVersioner{Scheme: scheme.Scheme}
+
+ b, err := backend.NewGeneric(rawManifest, encoder, decoder, kube.NewNamespaceEnforcer(), versioner, nil)
if err != nil {
return err
}
- // Set the log level
- logs.Logger.SetLevel(logrus.InfoLevel)
+ gitClient, err := client.NewGeneric(b)
+ if err != nil {
+ return err
+ }
- watchStorage, err := watch.NewManifestStorage(gitDir.Dir(), scheme.Serializer)
+ txGeneralClient, err := transactional.NewGeneric(gitClient, localClone, nil)
if err != nil {
return err
}
- defer func() { _ = watchStorage.Close() }()
- updates := make(chan update.Update, 4096)
- watchStorage.SetUpdateStream(updates)
+ // Note: This will add itself to the Commit/TxHook chains on the localClone.
+ txClient, err := distributed.NewClient(txGeneralClient, localClone)
+ if err != nil {
+ return err
+ }
+
+ // Register a tx hook so that a new copy-on-write overlay is created when transactions are made
+ versionRefHook := unstructuredtx.NewUnstructuredStorageTxHandler(gitClient)
+ txClient.TransactionHookChain().Register(versionRefHook)
+
+ // Create a new CommitHook for sending PRs
+ prCommitHook, err := githubpr.NewGitHubPRCommitHandler(ghClient, localClone.RepositoryRef())
+ if err != nil {
+ return err
+ }
+
+ // Register the PR CommitHook with the distributed transaction Client
+ // This needs to be done after the distributed.NewClient call, so
+ // it has been able to handle pushing of the branch first.
+ txClient.CommitHookChain().Register(prCommitHook)
+
+ // Start the sync loop in the background
+ txClient.StartResyncLoop(ctx, 15*time.Second)
go func() {
for upd := range updates {
- logrus.Infof("Got %s update for: %v %v", upd.Event, upd.PartialObject.GetObjectKind().GroupVersionKind(), upd.PartialObject.GetObjectMeta())
+ logrus.Infof("Got %s update for: %v %v", upd.Type, upd.ID.GroupKind(), upd.ID.ObjectKey())
}
}()
e := common.NewEcho()
e.GET("/git/", func(c echo.Context) error {
- objs, err := gitStorage.List(storage.NewKindKey(common.CarGVK))
- if err != nil {
+ list := &unstructured.UnstructuredList{}
+ list.SetGroupVersionKind(v1alpha1.SchemeGroupVersion.WithKind("CarList"))
+
+ /*if br := c.QueryParam("branch"); len(br) != 0 {
+ ctx = core.WithVersionRef(ctx, core.NewMutableVersionRef(br))
+ }*/
+
+ if err := txClient.List(ctx, list); err != nil {
+ return err
+ }
+ var content bytes.Buffer
+ if err := scheme.Serializer.Encoder().Encode(serializer.NewJSONFrameWriter(&content), list); err != nil {
return err
}
- return c.JSON(http.StatusOK, objs)
+ return c.JSONBlob(http.StatusOK, content.Bytes())
})
e.PUT("/git/:name", func(c echo.Context) error {
@@ -161,26 +242,38 @@ func run(identityFile, gitURL, ghToken, authorName, authorEmail string) error {
return echo.NewHTTPError(http.StatusBadRequest, "Please set name")
}
- objKey := common.CarKeyForName(name)
- err := gitStorage.Transaction(context.Background(), fmt.Sprintf("%s-update-", name), func(ctx context.Context, s storage.Storage) (transaction.CommitResult, error) {
-
- // Update the status of the car
- if err := common.SetNewCarStatus(s, objKey); err != nil {
- return nil, err
- }
+ // Create an empty typed object, the data from the client will be written into it
+ // at .Get-time below.
+ car := v1alpha1.Car{}
+ carKey := core.ObjectKey{Name: name}
+ // Our head branch is the name of the Car, and it ends in a "-", which makes the
+ // TxClient add a random sha suffix.
+ headBranch := fmt.Sprintf("%s-update-", name)
- return &transaction.GenericPullRequestResult{
- CommitResult: &transaction.GenericCommitResult{
- AuthorName: authorName,
- AuthorEmail: authorEmail,
- Title: "Update Car speed",
- Description: "We really need to sync this state!",
+ err := txClient.
+ BranchTransaction(ctx, headBranch). // Start a transaction of the base branch to the head
+ Get(carKey, &car). // Load the latest data of the Car into &car.
+ Custom(func(ctx context.Context) error { // Mutate (update) status of the Car
+ car.Status.Distance = rand.Uint64()
+ car.Status.Speed = rand.Float64() * 100
+ return nil
+ }).
+ Update(&car). // Store the changed car in the Storage
+ CreateTx(githubpr.GenericPullRequest{ // Create a commit for the tx; return the super-set PR commit
+ Commit: transactional.GenericCommit{
+ Author: transactional.GenericCommitAuthor{
+ Name: authorName,
+ Email: authorEmail,
+ },
+ Message: transactional.GenericCommitMessage{
+ Title: "Update Car speed",
+ Description: "We really need to sync this state!",
+ },
},
- Labels: []string{"user/bot", "actuator/libgitops", "kind/status-update"},
- Assignees: *prAssigneeFlag,
- Milestone: *prMilestoneFlag,
- }, nil
- })
+ Labels: prLabels,
+ Assignees: prAssignees,
+ Milestone: prMilestone,
+ }).Error()
if err != nil {
return err
}
diff --git a/cmd/sample-watch/main.go b/cmd/sample-watch/main.go
index ef1aec0a..474876f8 100644
--- a/cmd/sample-watch/main.go
+++ b/cmd/sample-watch/main.go
@@ -2,6 +2,7 @@ package main
import (
"bytes"
+ "context"
"fmt"
"net/http"
"os"
@@ -10,11 +11,19 @@ import (
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
"github.com/weaveworks/libgitops/cmd/common"
+ "github.com/weaveworks/libgitops/cmd/common/logs"
"github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/scheme"
- "github.com/weaveworks/libgitops/pkg/logs"
+ "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1"
"github.com/weaveworks/libgitops/pkg/serializer"
- "github.com/weaveworks/libgitops/pkg/storage/watch"
- "github.com/weaveworks/libgitops/pkg/storage/watch/update"
+ "github.com/weaveworks/libgitops/pkg/storage"
+ "github.com/weaveworks/libgitops/pkg/storage/backend"
+ "github.com/weaveworks/libgitops/pkg/storage/client"
+ "github.com/weaveworks/libgitops/pkg/storage/core"
+ "github.com/weaveworks/libgitops/pkg/storage/event"
+ "github.com/weaveworks/libgitops/pkg/storage/filesystem"
+ "github.com/weaveworks/libgitops/pkg/storage/filesystem/unstructured"
+ unstructuredevent "github.com/weaveworks/libgitops/pkg/storage/filesystem/unstructured/event"
+ "github.com/weaveworks/libgitops/pkg/storage/kube"
)
var watchDirFlag = pflag.String("watch-dir", "/tmp/libgitops/watch", "Where to watch for YAML/JSON manifests")
@@ -24,33 +33,62 @@ func main() {
common.ParseVersionFlag()
// Run the application
- if err := run(); err != nil {
+ if err := run(*watchDirFlag); err != nil {
fmt.Println(err)
os.Exit(1)
}
}
-func run() error {
+func run(watchDir string) error {
// Create the watch directory
if err := os.MkdirAll(*watchDirFlag, 0755); err != nil {
return err
}
// Set the log level
- logs.Logger.SetLevel(logrus.InfoLevel)
+ logs.Logger.SetLevel(logrus.TraceLevel)
- watchStorage, err := watch.NewManifestStorage(*watchDirFlag, scheme.Serializer)
+ ctx := context.Background()
+
+ // Just use default encoders and decoders
+ encoder := scheme.Serializer.Encoder()
+ decoder := scheme.Serializer.Decoder()
+
+ rawManifest, err := unstructuredevent.NewManifest(
+ watchDir,
+ filesystem.DefaultContentTyper,
+ storage.StaticNamespacer{NamespacedIsDefaultPolicy: false}, // all objects root-spaced
+ unstructured.KubeObjectRecognizer{Decoder: decoder},
+ filesystem.DefaultPathExcluders(),
+ )
+ if err != nil {
+ return err
+ }
+
+ // Create the channel to receive events to, and register it with the EventStorage
+ updates := make(event.ObjectEventStream, 4096)
+ if err := rawManifest.WatchForObjectEvents(ctx, updates); err != nil {
+ return err
+ }
+
+ // Use the version information in the scheme to determine the storage version
+ versioner := backend.SchemePreferredVersioner{Scheme: scheme.Scheme}
+
+ b, err := backend.NewGeneric(rawManifest, encoder, decoder, kube.NewNamespaceEnforcer(), versioner, nil)
+ if err != nil {
+ return err
+ }
+
+ watchStorage, err := client.NewGeneric(b)
if err != nil {
return err
}
- defer func() { _ = watchStorage.Close() }()
- updates := make(chan update.Update, 4096)
- watchStorage.SetUpdateStream(updates)
+ defer func() { _ = rawManifest.Close() }()
go func() {
for upd := range updates {
- logrus.Infof("Got %s update for: %v %v", upd.Event, upd.PartialObject.GetObjectKind().GroupVersionKind(), upd.PartialObject.GetObjectMeta())
+ logrus.Infof("Got %s update for: %v %v", upd.Type, upd.ID.GroupKind(), upd.ID.ObjectKey())
}
}()
@@ -62,7 +100,8 @@ func run() error {
return echo.NewHTTPError(http.StatusBadRequest, "Please set name")
}
- obj, err := watchStorage.Get(common.CarKeyForName(name))
+ obj := &v1alpha1.Car{}
+ err := watchStorage.Get(ctx, core.ObjectKey{Name: name}, obj)
if err != nil {
return err
}
@@ -79,7 +118,7 @@ func run() error {
return echo.NewHTTPError(http.StatusBadRequest, "Please set name")
}
- if err := common.SetNewCarStatus(watchStorage, common.CarKeyForName(name)); err != nil {
+ if err := common.SetNewCarStatus(ctx, watchStorage, name); err != nil {
return err
}
return c.String(200, "OK!")
diff --git a/foo.diff b/foo.diff
new file mode 100644
index 00000000..d63a4c49
--- /dev/null
+++ b/foo.diff
@@ -0,0 +1,14208 @@
+diff --git a/Makefile b/Makefile
+index 4f3230b..1f2c88b 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,5 +1,5 @@
+ UID_GID ?= $(shell id -u):$(shell id -g)
+-GO_VERSION ?= 1.14.4
++GO_VERSION ?= 1.15.6
+ GIT_VERSION := $(shell hack/ldflags.sh --version-only)
+ PROJECT := github.com/weaveworks/libgitops
+ BOUNDING_API_DIRS := ${PROJECT}/cmd/apis/sample
+@@ -7,7 +7,6 @@ API_DIRS := ${PROJECT}/cmd/sample-app/apis/sample,${PROJECT}/cmd/sample-app/apis
+ SRC_PKGS := cmd pkg
+ DOCKER_ARGS := --rm
+ CACHE_DIR := $(shell pwd)/bin/cache
+-API_DOCS := api/sample-app.md api/runtime.md
+ BINARIES := bin/sample-app bin/sample-gitops bin/sample-watch
+
+ # If we're not running in CI, run Docker interactively
+@@ -39,7 +38,6 @@ test-internal:
+ tidy: docker-tidy-internal
+ tidy-internal: /go/bin/goimports
+ go mod tidy
+- hack/generate-client.sh
+ gofmt -s -w ${SRC_PKGS}
+ goimports -w ${SRC_PKGS}
+
+diff --git a/cmd/common/common.go b/cmd/common/common.go
+index dcba7c6..f011dac 100644
+--- a/cmd/common/common.go
++++ b/cmd/common/common.go
+@@ -13,8 +13,8 @@ import (
+ "github.com/spf13/pflag"
+ "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1"
+ "github.com/weaveworks/libgitops/cmd/sample-app/version"
+- "github.com/weaveworks/libgitops/pkg/runtime"
+- "github.com/weaveworks/libgitops/pkg/storage"
++ "github.com/weaveworks/libgitops/pkg/storage/client"
++ "github.com/weaveworks/libgitops/pkg/storage/core"
+ )
+
+ var (
+@@ -25,10 +25,6 @@ func init() {
+ rand.Seed(time.Now().UnixNano())
+ }
+
+-func CarKeyForName(name string) storage.ObjectKey {
+- return storage.NewObjectKey(storage.NewKindKey(CarGVK), runtime.NewIdentifier("default/"+name))
+-}
+-
+ func NewCar(name string) *v1alpha1.Car {
+ obj := &v1alpha1.Car{}
+ obj.Name = name
+@@ -38,17 +34,17 @@ func NewCar(name string) *v1alpha1.Car {
+ return obj
+ }
+
+-func SetNewCarStatus(s storage.Storage, key storage.ObjectKey) error {
+- obj, err := s.Get(key)
++func SetNewCarStatus(ctx context.Context, c client.Client, name string) error {
++ car := &v1alpha1.Car{}
++ err := c.Get(ctx, core.ObjectKey{Name: name}, car)
+ if err != nil {
+ return err
+ }
+
+- car := obj.(*v1alpha1.Car)
+ car.Status.Distance = rand.Uint64()
+ car.Status.Speed = rand.Float64() * 100
+
+- return s.Update(car)
++ return c.Update(ctx, car)
+ }
+
+ func ParseVersionFlag() {
+@@ -75,8 +71,8 @@ func NewEcho() *echo.Echo {
+ func StartEcho(e *echo.Echo) error {
+ // Start the server
+ go func() {
+- if err := e.Start(":8888"); err != nil {
+- e.Logger.Info("shutting down the server")
++ if err := e.Start(":8881"); err != nil {
++ e.Logger.Info("shutting down the server", err)
+ }
+ }()
+
+diff --git a/pkg/logs/flag/flag.go b/cmd/common/logs/flag/flag.go
+similarity index 84%
+rename from pkg/logs/flag/flag.go
+rename to cmd/common/logs/flag/flag.go
+index 3c226cf..83f5967 100644
+--- a/pkg/logs/flag/flag.go
++++ b/cmd/common/logs/flag/flag.go
+@@ -5,6 +5,9 @@ import (
+ "github.com/spf13/pflag"
+ )
+
++// TODO: Use these flags in the sample binaries?
++// TODO: Move to the way controller-runtime does logs instead?
++
+ type LogLevelFlag struct {
+ value *logrus.Level
+ }
+diff --git a/pkg/logs/logs.go b/cmd/common/logs/logs.go
+similarity index 95%
+rename from pkg/logs/logs.go
+rename to cmd/common/logs/logs.go
+index 1ca78f1..c5b11a8 100644
+--- a/pkg/logs/logs.go
++++ b/cmd/common/logs/logs.go
+@@ -8,6 +8,8 @@ import (
+ log "github.com/sirupsen/logrus"
+ )
+
++// TODO: Move to the way controller-runtime does logs instead?
++
+ // Quiet specifies whether to only print machine-readable IDs
+ var Quiet bool
+
+diff --git a/cmd/sample-app/client/client.go b/cmd/sample-app/client/client.go
+deleted file mode 100644
+index e4d9824..0000000
+--- a/cmd/sample-app/client/client.go
++++ /dev/null
+@@ -1,61 +0,0 @@
+-// TODO: Docs
+-
+-// +build ignore
+-
+-package client
+-
+-import (
+- api "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample"
+- "github.com/weaveworks/libgitops/pkg/client"
+- "github.com/weaveworks/libgitops/pkg/runtime"
+- "github.com/weaveworks/libgitops/pkg/storage"
+-
+- "k8s.io/apimachinery/pkg/runtime/schema"
+-)
+-
+-// TODO: Autogenerate this!
+-
+-// NewClient creates a client for the specified storage
+-func NewClient(s storage.Storage) *Client {
+- return &Client{
+- SampleInternalClient: NewSampleInternalClient(s),
+- }
+-}
+-
+-// Client is a struct providing high-level access to objects in a storage
+-// The resource-specific client interfaces are automatically generated based
+-// off client_resource_template.go. The auto-generation can be done with hack/client.sh
+-// At the moment SampleInternalClient is the default client. If more than this client
+-// is created in the future, the SampleInternalClient will be accessible under
+-// Client.SampleInternal() instead.
+-type Client struct {
+- *SampleInternalClient
+-}
+-
+-func NewSampleInternalClient(s storage.Storage) *SampleInternalClient {
+- return &SampleInternalClient{
+- storage: s,
+- dynamicClients: map[schema.GroupVersionKind]client.DynamicClient{},
+- gv: api.SchemeGroupVersion,
+- }
+-}
+-
+-type SampleInternalClient struct {
+- storage storage.Storage
+- gv schema.GroupVersion
+- carClient CarClient
+- motorcycleClient MotorcycleClient
+- dynamicClients map[schema.GroupVersionKind]client.DynamicClient
+-}
+-
+-// Dynamic returns the DynamicClient for the Client instance, for the specific kind
+-func (c *SampleInternalClient) Dynamic(kind runtime.Kind) (dc client.DynamicClient) {
+- var ok bool
+- gvk := c.gv.WithKind(kind.Title())
+- if dc, ok = c.dynamicClients[gvk]; !ok {
+- dc = client.NewDynamicClient(c.storage, gvk)
+- c.dynamicClients[gvk] = dc
+- }
+-
+- return
+-}
+diff --git a/cmd/sample-app/client/zz_generated.client_car.go b/cmd/sample-app/client/zz_generated.client_car.go
+deleted file mode 100644
+index 2661d45..0000000
+--- a/cmd/sample-app/client/zz_generated.client_car.go
++++ /dev/null
+@@ -1,152 +0,0 @@
+-// +build ignore
+-
+-/*
+- Note: This file is autogenerated! Do not edit it manually!
+- Edit client_car_template.go instead, and run
+- hack/generate-client.sh afterwards.
+-*/
+-
+-package client
+-
+-import (
+- "fmt"
+-
+- api "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample"
+-
+- log "github.com/sirupsen/logrus"
+- "github.com/weaveworks/libgitops/pkg/runtime"
+- "github.com/weaveworks/libgitops/pkg/storage"
+- "github.com/weaveworks/libgitops/pkg/storage/filterer"
+- "k8s.io/apimachinery/pkg/runtime/schema"
+-)
+-
+-// CarClient is an interface for accessing Car-specific API objects
+-type CarClient interface {
+- // New returns a new Car
+- New() *api.Car
+- // Get returns the Car matching given UID from the storage
+- Get(runtime.UID) (*api.Car, error)
+- // Set saves the given Car into persistent storage
+- Set(*api.Car) error
+- // Patch performs a strategic merge patch on the object with
+- // the given UID, using the byte-encoded patch given
+- Patch(runtime.UID, []byte) error
+- // Find returns the Car matching the given filter, filters can
+- // match e.g. the Object's Name, UID or a specific property
+- Find(filter filterer.BaseFilter) (*api.Car, error)
+- // FindAll returns multiple Cars matching the given filter, filters can
+- // match e.g. the Object's Name, UID or a specific property
+- FindAll(filter filterer.BaseFilter) ([]*api.Car, error)
+- // Delete deletes the Car with the given UID from the storage
+- Delete(uid runtime.UID) error
+- // List returns a list of all Cars available
+- List() ([]*api.Car, error)
+-}
+-
+-// Cars returns the CarClient for the Client object
+-func (c *SampleInternalClient) Cars() CarClient {
+- if c.carClient == nil {
+- c.carClient = newCarClient(c.storage, c.gv)
+- }
+-
+- return c.carClient
+-}
+-
+-// carClient is a struct implementing the CarClient interface
+-// It uses a shared storage instance passed from the Client together with its own Filterer
+-type carClient struct {
+- storage storage.Storage
+- filterer *filterer.Filterer
+- gvk schema.GroupVersionKind
+-}
+-
+-// newCarClient builds the carClient struct using the storage implementation and a new Filterer
+-func newCarClient(s storage.Storage, gv schema.GroupVersion) CarClient {
+- return &carClient{
+- storage: s,
+- filterer: filterer.NewFilterer(s),
+- gvk: gv.WithKind(api.KindCar.Title()),
+- }
+-}
+-
+-// New returns a new Object of its kind
+-func (c *carClient) New() *api.Car {
+- log.Tracef("Client.New; GVK: %v", c.gvk)
+- obj, err := c.storage.New(c.gvk)
+- if err != nil {
+- panic(fmt.Sprintf("Client.New must not return an error: %v", err))
+- }
+- return obj.(*api.Car)
+-}
+-
+-// Find returns a single Car based on the given Filter
+-func (c *carClient) Find(filter filterer.BaseFilter) (*api.Car, error) {
+- log.Tracef("Client.Find; GVK: %v", c.gvk)
+- object, err := c.filterer.Find(c.gvk, filter)
+- if err != nil {
+- return nil, err
+- }
+-
+- return object.(*api.Car), nil
+-}
+-
+-// FindAll returns multiple Cars based on the given Filter
+-func (c *carClient) FindAll(filter filterer.BaseFilter) ([]*api.Car, error) {
+- log.Tracef("Client.FindAll; GVK: %v", c.gvk)
+- matches, err := c.filterer.FindAll(c.gvk, filter)
+- if err != nil {
+- return nil, err
+- }
+-
+- results := make([]*api.Car, 0, len(matches))
+- for _, item := range matches {
+- results = append(results, item.(*api.Car))
+- }
+-
+- return results, nil
+-}
+-
+-// Get returns the Car matching given UID from the storage
+-func (c *carClient) Get(uid runtime.UID) (*api.Car, error) {
+- log.Tracef("Client.Get; UID: %q, GVK: %v", uid, c.gvk)
+- object, err := c.storage.Get(c.gvk, uid)
+- if err != nil {
+- return nil, err
+- }
+-
+- return object.(*api.Car), nil
+-}
+-
+-// Set saves the given Car into the persistent storage
+-func (c *carClient) Set(car *api.Car) error {
+- log.Tracef("Client.Set; UID: %q, GVK: %v", car.GetUID(), c.gvk)
+- return c.storage.Set(c.gvk, car)
+-}
+-
+-// Patch performs a strategic merge patch on the object with
+-// the given UID, using the byte-encoded patch given
+-func (c *carClient) Patch(uid runtime.UID, patch []byte) error {
+- return c.storage.Patch(c.gvk, uid, patch)
+-}
+-
+-// Delete deletes the Car from the storage
+-func (c *carClient) Delete(uid runtime.UID) error {
+- log.Tracef("Client.Delete; UID: %q, GVK: %v", uid, c.gvk)
+- return c.storage.Delete(c.gvk, uid)
+-}
+-
+-// List returns a list of all Cars available
+-func (c *carClient) List() ([]*api.Car, error) {
+- log.Tracef("Client.List; GVK: %v", c.gvk)
+- list, err := c.storage.List(c.gvk)
+- if err != nil {
+- return nil, err
+- }
+-
+- results := make([]*api.Car, 0, len(list))
+- for _, item := range list {
+- results = append(results, item.(*api.Car))
+- }
+-
+- return results, nil
+-}
+diff --git a/cmd/sample-app/client/zz_generated.client_motorcycle.go b/cmd/sample-app/client/zz_generated.client_motorcycle.go
+deleted file mode 100644
+index 7256e00..0000000
+--- a/cmd/sample-app/client/zz_generated.client_motorcycle.go
++++ /dev/null
+@@ -1,152 +0,0 @@
+-// +build ignore
+-
+-/*
+- Note: This file is autogenerated! Do not edit it manually!
+- Edit client_motorcycle_template.go instead, and run
+- hack/generate-client.sh afterwards.
+-*/
+-
+-package client
+-
+-import (
+- "fmt"
+-
+- api "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample"
+-
+- log "github.com/sirupsen/logrus"
+- "github.com/weaveworks/libgitops/pkg/runtime"
+- "github.com/weaveworks/libgitops/pkg/storage"
+- "github.com/weaveworks/libgitops/pkg/storage/filterer"
+- "k8s.io/apimachinery/pkg/runtime/schema"
+-)
+-
+-// MotorcycleClient is an interface for accessing Motorcycle-specific API objects
+-type MotorcycleClient interface {
+- // New returns a new Motorcycle
+- New() *api.Motorcycle
+- // Get returns the Motorcycle matching given UID from the storage
+- Get(runtime.UID) (*api.Motorcycle, error)
+- // Set saves the given Motorcycle into persistent storage
+- Set(*api.Motorcycle) error
+- // Patch performs a strategic merge patch on the object with
+- // the given UID, using the byte-encoded patch given
+- Patch(runtime.UID, []byte) error
+- // Find returns the Motorcycle matching the given filter, filters can
+- // match e.g. the Object's Name, UID or a specific property
+- Find(filter filterer.BaseFilter) (*api.Motorcycle, error)
+- // FindAll returns multiple Motorcycles matching the given filter, filters can
+- // match e.g. the Object's Name, UID or a specific property
+- FindAll(filter filterer.BaseFilter) ([]*api.Motorcycle, error)
+- // Delete deletes the Motorcycle with the given UID from the storage
+- Delete(uid runtime.UID) error
+- // List returns a list of all Motorcycles available
+- List() ([]*api.Motorcycle, error)
+-}
+-
+-// Motorcycles returns the MotorcycleClient for the Client object
+-func (c *SampleInternalClient) Motorcycles() MotorcycleClient {
+- if c.motorcycleClient == nil {
+- c.motorcycleClient = newMotorcycleClient(c.storage, c.gv)
+- }
+-
+- return c.motorcycleClient
+-}
+-
+-// motorcycleClient is a struct implementing the MotorcycleClient interface
+-// It uses a shared storage instance passed from the Client together with its own Filterer
+-type motorcycleClient struct {
+- storage storage.Storage
+- filterer *filterer.Filterer
+- gvk schema.GroupVersionKind
+-}
+-
+-// newMotorcycleClient builds the motorcycleClient struct using the storage implementation and a new Filterer
+-func newMotorcycleClient(s storage.Storage, gv schema.GroupVersion) MotorcycleClient {
+- return &motorcycleClient{
+- storage: s,
+- filterer: filterer.NewFilterer(s),
+- gvk: gv.WithKind(api.KindMotorcycle.Title()),
+- }
+-}
+-
+-// New returns a new Object of its kind
+-func (c *motorcycleClient) New() *api.Motorcycle {
+- log.Tracef("Client.New; GVK: %v", c.gvk)
+- obj, err := c.storage.New(c.gvk)
+- if err != nil {
+- panic(fmt.Sprintf("Client.New must not return an error: %v", err))
+- }
+- return obj.(*api.Motorcycle)
+-}
+-
+-// Find returns a single Motorcycle based on the given Filter
+-func (c *motorcycleClient) Find(filter filterer.BaseFilter) (*api.Motorcycle, error) {
+- log.Tracef("Client.Find; GVK: %v", c.gvk)
+- object, err := c.filterer.Find(c.gvk, filter)
+- if err != nil {
+- return nil, err
+- }
+-
+- return object.(*api.Motorcycle), nil
+-}
+-
+-// FindAll returns multiple Motorcycles based on the given Filter
+-func (c *motorcycleClient) FindAll(filter filterer.BaseFilter) ([]*api.Motorcycle, error) {
+- log.Tracef("Client.FindAll; GVK: %v", c.gvk)
+- matches, err := c.filterer.FindAll(c.gvk, filter)
+- if err != nil {
+- return nil, err
+- }
+-
+- results := make([]*api.Motorcycle, 0, len(matches))
+- for _, item := range matches {
+- results = append(results, item.(*api.Motorcycle))
+- }
+-
+- return results, nil
+-}
+-
+-// Get returns the Motorcycle matching given UID from the storage
+-func (c *motorcycleClient) Get(uid runtime.UID) (*api.Motorcycle, error) {
+- log.Tracef("Client.Get; UID: %q, GVK: %v", uid, c.gvk)
+- object, err := c.storage.Get(c.gvk, uid)
+- if err != nil {
+- return nil, err
+- }
+-
+- return object.(*api.Motorcycle), nil
+-}
+-
+-// Set saves the given Motorcycle into the persistent storage
+-func (c *motorcycleClient) Set(motorcycle *api.Motorcycle) error {
+- log.Tracef("Client.Set; UID: %q, GVK: %v", motorcycle.GetUID(), c.gvk)
+- return c.storage.Set(c.gvk, motorcycle)
+-}
+-
+-// Patch performs a strategic merge patch on the object with
+-// the given UID, using the byte-encoded patch given
+-func (c *motorcycleClient) Patch(uid runtime.UID, patch []byte) error {
+- return c.storage.Patch(c.gvk, uid, patch)
+-}
+-
+-// Delete deletes the Motorcycle from the storage
+-func (c *motorcycleClient) Delete(uid runtime.UID) error {
+- log.Tracef("Client.Delete; UID: %q, GVK: %v", uid, c.gvk)
+- return c.storage.Delete(c.gvk, uid)
+-}
+-
+-// List returns a list of all Motorcycles available
+-func (c *motorcycleClient) List() ([]*api.Motorcycle, error) {
+- log.Tracef("Client.List; GVK: %v", c.gvk)
+- list, err := c.storage.List(c.gvk)
+- if err != nil {
+- return nil, err
+- }
+-
+- results := make([]*api.Motorcycle, 0, len(list))
+- for _, item := range list {
+- results = append(results, item.(*api.Motorcycle))
+- }
+-
+- return results, nil
+-}
+diff --git a/cmd/sample-app/main.go b/cmd/sample-app/main.go
+index ea119a9..2812acc 100644
+--- a/cmd/sample-app/main.go
++++ b/cmd/sample-app/main.go
+@@ -2,7 +2,10 @@ package main
+
+ import (
+ "bytes"
++ "context"
++ "encoding/json"
+ "fmt"
++ "io/ioutil"
+ "net/http"
+ "os"
+
+@@ -10,12 +13,19 @@ import (
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/pflag"
+ "github.com/weaveworks/libgitops/cmd/common"
++ "github.com/weaveworks/libgitops/cmd/common/logs"
+ "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/scheme"
+ "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1"
+- "github.com/weaveworks/libgitops/pkg/logs"
+- "github.com/weaveworks/libgitops/pkg/runtime"
+ "github.com/weaveworks/libgitops/pkg/serializer"
+- "github.com/weaveworks/libgitops/pkg/storage"
++ "github.com/weaveworks/libgitops/pkg/storage/backend"
++ "github.com/weaveworks/libgitops/pkg/storage/client"
++ "github.com/weaveworks/libgitops/pkg/storage/core"
++ "github.com/weaveworks/libgitops/pkg/storage/filesystem"
++ "github.com/weaveworks/libgitops/pkg/storage/kube"
++ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
++ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
++ "k8s.io/apimachinery/pkg/types"
++ ctrlclient "sigs.k8s.io/controller-runtime/pkg/client"
+ )
+
+ var manifestDirFlag = pflag.String("data-dir", "/tmp/libgitops/manifest", "Where to store the YAML files")
+@@ -25,27 +35,43 @@ func main() {
+ common.ParseVersionFlag()
+
+ // Run the application
+- if err := run(); err != nil {
++ if err := run(*manifestDirFlag); err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+ }
+
+-func run() error {
++func run(manifestDir string) error {
++ ctx := context.Background()
+ // Create the manifest directory
+- if err := os.MkdirAll(*manifestDirFlag, 0755); err != nil {
++ if err := os.MkdirAll(manifestDir, 0755); err != nil {
+ return err
+ }
+
+ // Set the log level
+ logs.Logger.SetLevel(logrus.InfoLevel)
+
+- plainStorage := storage.NewGenericStorage(
+- storage.NewGenericRawStorage(*manifestDirFlag, v1alpha1.SchemeGroupVersion, serializer.ContentTypeYAML),
+- scheme.Serializer,
+- []runtime.IdentifierFactory{runtime.Metav1NameIdentifier},
++ s, err := filesystem.NewSimpleStorage(
++ manifestDir,
++ core.StaticNamespacer{NamespacedIsDefaultPolicy: false},
++ filesystem.SimpleFileFinderOptions{
++ DisableGroupDirectory: true,
++ ContentType: serializer.ContentTypeYAML,
++ },
+ )
+- defer func() { _ = plainStorage.Close() }()
++ if err != nil {
++ return err
++ }
++
++ b, err := backend.NewGeneric(s, scheme.Serializer, kube.NewNamespaceEnforcer(), nil, nil)
++ if err != nil {
++ return err
++ }
++
++ plainClient, err := client.NewGeneric(b, scheme.Serializer.Patcher())
++ if err != nil {
++ return err
++ }
+
+ e := common.NewEcho()
+
+@@ -55,7 +81,8 @@ func run() error {
+ return echo.NewHTTPError(http.StatusBadRequest, "Please set name")
+ }
+
+- obj, err := plainStorage.Get(common.CarKeyForName(name))
++ obj := &v1alpha1.Car{}
++ err := plainClient.Get(ctx, core.ObjectKey{Name: name}, obj)
+ if err != nil {
+ return err
+ }
+@@ -66,13 +93,92 @@ func run() error {
+ return c.JSONBlob(http.StatusOK, content.Bytes())
+ })
+
++ e.GET("/meta/", func(c echo.Context) error {
++ list := &metav1.PartialObjectMetadataList{}
++ list.SetGroupVersionKind(v1alpha1.SchemeGroupVersion.WithKind("CarList"))
++ err := plainClient.List(ctx, list)
++ if err != nil {
++ return err
++ }
++ var content bytes.Buffer
++ if err := scheme.Serializer.Encoder().Encode(serializer.NewJSONFrameWriter(&content), list); err != nil {
++ return err
++ }
++ return c.JSONBlob(http.StatusOK, content.Bytes())
++ })
++
++ e.GET("/meta/:name", func(c echo.Context) error {
++ name := c.Param("name")
++ if len(name) == 0 {
++ return echo.NewHTTPError(http.StatusBadRequest, "Please set name")
++ }
++
++ obj := &metav1.PartialObjectMetadata{}
++ obj.SetGroupVersionKind(v1alpha1.SchemeGroupVersion.WithKind("Car"))
++ err := plainClient.Get(ctx, core.ObjectKey{
++ Name: name,
++ }, obj)
++ if err != nil {
++ return err
++ }
++ var content bytes.Buffer
++ if err := scheme.Serializer.Encoder().Encode(serializer.NewJSONFrameWriter(&content), obj); err != nil {
++ return err
++ }
++ return c.JSONBlob(http.StatusOK, content.Bytes())
++ })
++
++ e.GET("/unstructured/", func(c echo.Context) error {
++ list := &unstructured.UnstructuredList{}
++ list.SetGroupVersionKind(v1alpha1.SchemeGroupVersion.WithKind("CarList"))
++ err := plainClient.List(ctx, list)
++ if err != nil {
++ return err
++ }
++ var content bytes.Buffer
++ if err := scheme.Serializer.Encoder().Encode(serializer.NewJSONFrameWriter(&content), list); err != nil {
++ return err
++ }
++ var newcontent bytes.Buffer
++ if err := json.Indent(&newcontent, content.Bytes(), "", " "); err != nil {
++ return err
++ }
++ return c.JSONBlob(http.StatusOK, newcontent.Bytes())
++ })
++
++ e.GET("/unstructured/:name", func(c echo.Context) error {
++ name := c.Param("name")
++ if len(name) == 0 {
++ return echo.NewHTTPError(http.StatusBadRequest, "Please set name")
++ }
++
++ obj := &unstructured.Unstructured{}
++ obj.SetGroupVersionKind(v1alpha1.SchemeGroupVersion.WithKind("Car"))
++ err := plainClient.Get(ctx, core.ObjectKey{
++ Name: name,
++ }, obj)
++ if err != nil {
++ return err
++ }
++ var content bytes.Buffer
++ // This does for some reason not pretty-encode the output
++ if err := scheme.Serializer.Encoder().Encode(serializer.NewJSONFrameWriter(&content), obj); err != nil {
++ return err
++ }
++ var newcontent bytes.Buffer
++ if err := json.Indent(&newcontent, content.Bytes(), "", " "); err != nil {
++ return err
++ }
++ return c.JSONBlob(http.StatusOK, newcontent.Bytes())
++ })
++
+ e.POST("/plain/:name", func(c echo.Context) error {
+ name := c.Param("name")
+ if len(name) == 0 {
+ return echo.NewHTTPError(http.StatusBadRequest, "Please set name")
+ }
+
+- if err := plainStorage.Create(common.NewCar(name)); err != nil {
++ if err := plainClient.Create(ctx, common.NewCar(name)); err != nil {
+ return err
+ }
+ return c.String(200, "OK!")
+@@ -84,11 +190,45 @@ func run() error {
+ return echo.NewHTTPError(http.StatusBadRequest, "Please set name")
+ }
+
+- if err := common.SetNewCarStatus(plainStorage, common.CarKeyForName(name)); err != nil {
++ if err := common.SetNewCarStatus(ctx, plainClient, name); err != nil {
+ return err
+ }
+ return c.String(200, "OK!")
+ })
+
++ e.PATCH("/plain/:name", func(c echo.Context) error {
++ name := c.Param("name")
++ if len(name) == 0 {
++ return echo.NewHTTPError(http.StatusBadRequest, "Please set name")
++ }
++
++ body, err := ioutil.ReadAll(c.Request().Body)
++ if err != nil {
++ return err
++ }
++ c.Request().Body.Close()
++
++ car := &v1alpha1.Car{}
++ err = plainClient.Get(ctx, core.ObjectKey{
++ Name: name,
++ }, car)
++ if err != nil {
++ return err
++ }
++
++ if err := plainClient.Patch(ctx, car, ctrlclient.RawPatch(types.MergePatchType, body)); err != nil {
++ return err
++ }
++
++ return c.JSON(200, car)
++ })
++
+ return common.StartEcho(e)
+ }
++
++/*
++type noNamespacesRESTMapper struct{}
++
++func (noNamespacesRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) {
++ return &meta.RESTMapping{Scope: meta.RESTScopeRoot}, nil
++}*/
+diff --git a/cmd/sample-gitops/main.go b/cmd/sample-gitops/main.go
+index e8c2180..d18a9d5 100644
+--- a/cmd/sample-gitops/main.go
++++ b/cmd/sample-gitops/main.go
+@@ -1,9 +1,11 @@
+ package main
+
+ import (
++ "bytes"
+ "context"
+ "fmt"
+ "io/ioutil"
++ "math/rand"
+ "net/http"
+ "os"
+ "time"
+@@ -15,14 +17,22 @@ import (
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/pflag"
+ "github.com/weaveworks/libgitops/cmd/common"
++ "github.com/weaveworks/libgitops/cmd/common/logs"
+ "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/scheme"
+- "github.com/weaveworks/libgitops/pkg/gitdir"
+- "github.com/weaveworks/libgitops/pkg/logs"
+- "github.com/weaveworks/libgitops/pkg/storage"
+- "github.com/weaveworks/libgitops/pkg/storage/transaction"
+- githubpr "github.com/weaveworks/libgitops/pkg/storage/transaction/pullrequest/github"
+- "github.com/weaveworks/libgitops/pkg/storage/watch"
+- "github.com/weaveworks/libgitops/pkg/storage/watch/update"
++ "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1"
++ "github.com/weaveworks/libgitops/pkg/serializer"
++ "github.com/weaveworks/libgitops/pkg/storage/backend"
++ "github.com/weaveworks/libgitops/pkg/storage/client"
++ "github.com/weaveworks/libgitops/pkg/storage/client/transactional"
++ "github.com/weaveworks/libgitops/pkg/storage/client/transactional/distributed"
++ "github.com/weaveworks/libgitops/pkg/storage/client/transactional/distributed/git"
++ githubpr "github.com/weaveworks/libgitops/pkg/storage/client/transactional/distributed/git/github"
++ "github.com/weaveworks/libgitops/pkg/storage/core"
++ "github.com/weaveworks/libgitops/pkg/storage/event"
++ "github.com/weaveworks/libgitops/pkg/storage/filesystem"
++ unstructuredevent "github.com/weaveworks/libgitops/pkg/storage/filesystem/unstructured/event"
++ "github.com/weaveworks/libgitops/pkg/storage/kube"
++ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ )
+
+ var (
+@@ -30,8 +40,9 @@ var (
+ authorNameFlag = pflag.String("author-name", defaultAuthorName, "Author name for Git commits")
+ authorEmailFlag = pflag.String("author-email", defaultAuthorEmail, "Author email for Git commits")
+ gitURLFlag = pflag.String("git-url", "", "HTTPS Git URL; where the Git repository is, e.g. https://github.com/luxas/ignite-gitops")
+- prAssigneeFlag = pflag.StringSlice("pr-assignees", nil, "What user logins to assign for the created PR. The user must have pull access to the repo.")
+ prMilestoneFlag = pflag.String("pr-milestone", "", "What milestone to tag the PR with")
++ prAssigneesFlag = pflag.StringSlice("pr-assignees", nil, "What user logins to assign for the created PR. The user must have pull access to the repo.")
++ prLabelsFlag = pflag.StringSlice("pr-labels", nil, "What labels to apply on the created PR. The labels must already exist. E.g. \"user/bot,actuator/libgitops,kind/status-update\"")
+ )
+
+ const (
+@@ -46,7 +57,16 @@ func main() {
+ common.ParseVersionFlag()
+
+ // Run the application
+- if err := run(*identityFlag, *gitURLFlag, os.Getenv("GITHUB_TOKEN"), *authorNameFlag, *authorEmailFlag); err != nil {
++ if err := run(
++ *identityFlag,
++ *gitURLFlag,
++ os.Getenv("GITHUB_TOKEN"),
++ *authorNameFlag,
++ *authorEmailFlag,
++ *prMilestoneFlag,
++ *prAssigneesFlag,
++ *prLabelsFlag,
++ ); err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+@@ -60,7 +80,8 @@ func expandAndRead(filePath string) ([]byte, error) {
+ return ioutil.ReadFile(expandedPath)
+ }
+
+-func run(identityFile, gitURL, ghToken, authorName, authorEmail string) error {
++func run(identityFile, gitURL, ghToken, authorName, authorEmail, prMilestone string,
++ prAssignees, prLabels []string) error {
+ // Validate parameters
+ if len(identityFile) == 0 {
+ return fmt.Errorf("--identity-file is required")
+@@ -69,7 +90,7 @@ func run(identityFile, gitURL, ghToken, authorName, authorEmail string) error {
+ return fmt.Errorf("--git-url is required")
+ }
+ if len(ghToken) == 0 {
+- return fmt.Errorf("--github-token is required")
++ return fmt.Errorf("GITHUB_TOKEN is required")
+ }
+ if len(authorName) == 0 {
+ return fmt.Errorf("--author-name is required")
+@@ -78,6 +99,9 @@ func run(identityFile, gitURL, ghToken, authorName, authorEmail string) error {
+ return fmt.Errorf("--author-email is required")
+ }
+
++ // Set the log level
++ logs.Logger.SetLevel(logrus.TraceLevel)
++
+ // Read the identity and known_hosts files
+ identityContent, err := expandAndRead(identityFile)
+ if err != nil {
+@@ -101,58 +125,101 @@ func run(identityFile, gitURL, ghToken, authorName, authorEmail string) error {
+ }
+
+ // Authenticate to the GitDirectory using Git SSH
+- authMethod, err := gitdir.NewSSHAuthMethod(identityContent, knownHostsContent)
++ authMethod, err := git.NewSSHAuthMethod(identityContent, knownHostsContent)
+ if err != nil {
+ return err
+ }
+
+- // Construct the GitDirectory implementation which backs the storage
+- gitDir, err := gitdir.NewGitDirectory(repoRef, gitdir.GitDirectoryOptions{
++ ctx, cancel := context.WithCancel(context.Background())
++
++ defer func() { cancel() }()
++
++ // Construct the LocalClone implementation which backs the storage
++ localClone, err := git.NewLocalClone(ctx, repoRef, git.LocalCloneOptions{
+ Branch: "master",
+- Interval: 10 * time.Second,
+ AuthMethod: authMethod,
+ })
+ if err != nil {
+ return err
+ }
+
+- // Create a new PR provider for the GitStorage
+- prProvider, err := githubpr.NewGitHubPRProvider(ghClient)
++ rawManifest, err := unstructuredevent.NewManifest(
++ localClone.Dir(),
++ filesystem.DefaultContentTyper,
++ core.StaticNamespacer{NamespacedIsDefaultPolicy: false}, // all objects root-spaced
++ &core.SerializerObjectRecognizer{Serializer: scheme.Serializer},
++ filesystem.DefaultPathExcluders(),
++ )
+ if err != nil {
+ return err
+ }
+- // Create a new GitStorage using the GitDirectory, PR provider, and Serializer
+- gitStorage, err := transaction.NewGitStorage(gitDir, prProvider, scheme.Serializer)
++
++ // Create the channel to receive events to, and register it with the EventStorage
++ updates := make(event.ObjectEventStream, 4096)
++ if err := rawManifest.WatchForObjectEvents(ctx, updates); err != nil {
++ return err
++ }
++
++ defer func() { _ = rawManifest.Close() }()
++
++ b, err := backend.NewGeneric(rawManifest, scheme.Serializer, kube.NewNamespaceEnforcer(), nil, nil)
+ if err != nil {
+ return err
+ }
+
+- // Set the log level
+- logs.Logger.SetLevel(logrus.InfoLevel)
++ gitClient, err := client.NewGeneric(b, scheme.Serializer.Patcher())
++ if err != nil {
++ return err
++ }
+
+- watchStorage, err := watch.NewManifestStorage(gitDir.Dir(), scheme.Serializer)
++ txGeneralClient, err := transactional.NewGeneric(gitClient, localClone, nil)
+ if err != nil {
+ return err
+ }
+- defer func() { _ = watchStorage.Close() }()
+
+- updates := make(chan update.Update, 4096)
+- watchStorage.SetUpdateStream(updates)
++ txClient, err := distributed.NewClient(txGeneralClient, localClone)
++ if err != nil {
++ return err
++ }
++
++ // Create a new CommitHook for sending PRs
++ prCommitHook, err := githubpr.NewGitHubPRCommitHandler(ghClient, localClone.RepositoryRef())
++ if err != nil {
++ return err
++ }
++
++ // Register the PR CommitHook with the BranchManager
++ // This needs to be done after the distributed.NewClient call, so
++ // it has been able to handle pushing of the branch first.
++ localClone.CommitHookChain().Register(prCommitHook)
++
++ // Start the sync loop in the background
++ txClient.StartResyncLoop(ctx, 15*time.Second)
+
+ go func() {
+ for upd := range updates {
+- logrus.Infof("Got %s update for: %v %v", upd.Event, upd.PartialObject.GetObjectKind().GroupVersionKind(), upd.PartialObject.GetObjectMeta())
++ logrus.Infof("Got %s update for: %v %v", upd.Type, upd.ID.GroupKind(), upd.ID.ObjectKey())
+ }
+ }()
+
+ e := common.NewEcho()
+
+ e.GET("/git/", func(c echo.Context) error {
+- objs, err := gitStorage.List(storage.NewKindKey(common.CarGVK))
+- if err != nil {
++ list := &unstructured.UnstructuredList{}
++ list.SetGroupVersionKind(v1alpha1.SchemeGroupVersion.WithKind("CarList"))
++
++ /*if br := c.QueryParam("branch"); len(br) != 0 {
++ ctx = core.WithVersionRef(ctx, core.NewBranchRef(br))
++ }*/
++
++ if err := txClient.List(ctx, list); err != nil {
++ return err
++ }
++ var content bytes.Buffer
++ if err := scheme.Serializer.Encoder().Encode(serializer.NewJSONFrameWriter(&content), list); err != nil {
+ return err
+ }
+- return c.JSON(http.StatusOK, objs)
++ return c.JSONBlob(http.StatusOK, content.Bytes())
+ })
+
+ e.PUT("/git/:name", func(c echo.Context) error {
+@@ -161,26 +228,36 @@ func run(identityFile, gitURL, ghToken, authorName, authorEmail string) error {
+ return echo.NewHTTPError(http.StatusBadRequest, "Please set name")
+ }
+
+- objKey := common.CarKeyForName(name)
+- err := gitStorage.Transaction(context.Background(), fmt.Sprintf("%s-update-", name), func(ctx context.Context, s storage.Storage) (transaction.CommitResult, error) {
++ car := v1alpha1.Car{}
++ carKey := core.ObjectKey{Name: name}
+
+- // Update the status of the car
+- if err := common.SetNewCarStatus(s, objKey); err != nil {
+- return nil, err
+- }
++ branchCtx := core.WithVersionRef(ctx, core.NewBranchRef(localClone.MainBranch()))
+
+- return &transaction.GenericPullRequestResult{
+- CommitResult: &transaction.GenericCommitResult{
+- AuthorName: authorName,
+- AuthorEmail: authorEmail,
+- Title: "Update Car speed",
+- Description: "We really need to sync this state!",
++ headBranch := fmt.Sprintf("%s-update-", name)
++ err := txClient.
++ BranchTransaction(branchCtx, headBranch).
++ Get(carKey, &car).
++ Custom(func(ctx context.Context) error {
++ car.Status.Distance = rand.Uint64()
++ car.Status.Speed = rand.Float64() * 100
++ return nil
++ }).
++ Update(&car).
++ CreateTx(githubpr.GenericPullRequest{
++ Commit: transactional.GenericCommit{
++ Author: transactional.GenericCommitAuthor{
++ Name: authorName,
++ Email: authorEmail,
++ },
++ Message: transactional.GenericCommitMessage{
++ Title: "Update Car speed",
++ Description: "We really need to sync this state!",
++ },
+ },
+- Labels: []string{"user/bot", "actuator/libgitops", "kind/status-update"},
+- Assignees: *prAssigneeFlag,
+- Milestone: *prMilestoneFlag,
+- }, nil
+- })
++ Labels: prLabels,
++ Assignees: prAssignees,
++ Milestone: prMilestone,
++ }).Error()
+ if err != nil {
+ return err
+ }
+diff --git a/cmd/sample-watch/main.go b/cmd/sample-watch/main.go
+index ef1aec0..c81a279 100644
+--- a/cmd/sample-watch/main.go
++++ b/cmd/sample-watch/main.go
+@@ -2,6 +2,7 @@ package main
+
+ import (
+ "bytes"
++ "context"
+ "fmt"
+ "net/http"
+ "os"
+@@ -10,11 +11,17 @@ import (
+ "github.com/sirupsen/logrus"
+ "github.com/spf13/pflag"
+ "github.com/weaveworks/libgitops/cmd/common"
++ "github.com/weaveworks/libgitops/cmd/common/logs"
+ "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/scheme"
+- "github.com/weaveworks/libgitops/pkg/logs"
++ "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1"
+ "github.com/weaveworks/libgitops/pkg/serializer"
+- "github.com/weaveworks/libgitops/pkg/storage/watch"
+- "github.com/weaveworks/libgitops/pkg/storage/watch/update"
++ "github.com/weaveworks/libgitops/pkg/storage/backend"
++ "github.com/weaveworks/libgitops/pkg/storage/client"
++ "github.com/weaveworks/libgitops/pkg/storage/core"
++ "github.com/weaveworks/libgitops/pkg/storage/event"
++ "github.com/weaveworks/libgitops/pkg/storage/filesystem"
++ unstructuredevent "github.com/weaveworks/libgitops/pkg/storage/filesystem/unstructured/event"
++ "github.com/weaveworks/libgitops/pkg/storage/kube"
+ )
+
+ var watchDirFlag = pflag.String("watch-dir", "/tmp/libgitops/watch", "Where to watch for YAML/JSON manifests")
+@@ -24,33 +31,55 @@ func main() {
+ common.ParseVersionFlag()
+
+ // Run the application
+- if err := run(); err != nil {
++ if err := run(*watchDirFlag); err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+ }
+
+-func run() error {
++func run(watchDir string) error {
+ // Create the watch directory
+ if err := os.MkdirAll(*watchDirFlag, 0755); err != nil {
+ return err
+ }
+
+ // Set the log level
+- logs.Logger.SetLevel(logrus.InfoLevel)
++ logs.Logger.SetLevel(logrus.TraceLevel)
+
+- watchStorage, err := watch.NewManifestStorage(*watchDirFlag, scheme.Serializer)
++ ctx := context.Background()
++
++ rawManifest, err := unstructuredevent.NewManifest(
++ watchDir,
++ filesystem.DefaultContentTyper,
++ core.StaticNamespacer{NamespacedIsDefaultPolicy: false}, // all objects root-spaced
++ &core.SerializerObjectRecognizer{Serializer: scheme.Serializer},
++ filesystem.DefaultPathExcluders(),
++ )
++ if err != nil {
++ return err
++ }
++
++ // Create the channel to receive events to, and register it with the EventStorage
++ updates := make(event.ObjectEventStream, 4096)
++ if err := rawManifest.WatchForObjectEvents(ctx, updates); err != nil {
++ return err
++ }
++
++ b, err := backend.NewGeneric(rawManifest, scheme.Serializer, kube.NewNamespaceEnforcer(), nil, nil)
++ if err != nil {
++ return err
++ }
++
++ watchStorage, err := client.NewGeneric(b, scheme.Serializer.Patcher())
+ if err != nil {
+ return err
+ }
+- defer func() { _ = watchStorage.Close() }()
+
+- updates := make(chan update.Update, 4096)
+- watchStorage.SetUpdateStream(updates)
++ defer func() { _ = rawManifest.Close() }()
+
+ go func() {
+ for upd := range updates {
+- logrus.Infof("Got %s update for: %v %v", upd.Event, upd.PartialObject.GetObjectKind().GroupVersionKind(), upd.PartialObject.GetObjectMeta())
++ logrus.Infof("Got %s update for: %v %v", upd.Type, upd.ID.GroupKind(), upd.ID.ObjectKey())
+ }
+ }()
+
+@@ -62,7 +91,8 @@ func run() error {
+ return echo.NewHTTPError(http.StatusBadRequest, "Please set name")
+ }
+
+- obj, err := watchStorage.Get(common.CarKeyForName(name))
++ obj := &v1alpha1.Car{}
++ err := watchStorage.Get(ctx, core.ObjectKey{Name: name}, obj)
+ if err != nil {
+ return err
+ }
+@@ -79,7 +109,7 @@ func run() error {
+ return echo.NewHTTPError(http.StatusBadRequest, "Please set name")
+ }
+
+- if err := common.SetNewCarStatus(watchStorage, common.CarKeyForName(name)); err != nil {
++ if err := common.SetNewCarStatus(ctx, watchStorage, name); err != nil {
+ return err
+ }
+ return c.String(200, "OK!")
+diff --git a/go.mod b/go.mod
+index c03013f..499f482 100644
+--- a/go.mod
++++ b/go.mod
+@@ -1,31 +1,30 @@
+ module github.com/weaveworks/libgitops
+
+-go 1.14
++go 1.15
+
+-replace (
+- github.com/docker/distribution => github.com/docker/distribution v2.7.1+incompatible
+- github.com/googleapis/gnostic => github.com/googleapis/gnostic v0.3.0
+-)
++replace github.com/docker/distribution => github.com/docker/distribution v2.7.1+incompatible
+
+ require (
+- github.com/fluxcd/go-git-providers v0.0.2
+- github.com/fluxcd/toolkit v0.0.1-beta.2
+- github.com/go-git/go-git/v5 v5.1.0
+- github.com/go-openapi/spec v0.19.8
++ github.com/evanphx/json-patch v4.9.0+incompatible
++ github.com/fluxcd/go-git-providers v0.0.3
++ github.com/fluxcd/pkg/ssh v0.0.5
++ github.com/go-git/go-git/v5 v5.2.0
++ github.com/go-openapi/spec v0.20.0
+ github.com/google/go-github/v32 v32.1.0
+ github.com/labstack/echo v3.3.10+incompatible
+ github.com/labstack/gommon v0.3.0 // indirect
+ github.com/mattn/go-isatty v0.0.12 // indirect
+ github.com/mitchellh/go-homedir v1.1.0
+ github.com/rjeczalik/notify v0.9.2
+- github.com/sirupsen/logrus v1.6.0
++ github.com/sirupsen/logrus v1.7.0
++ github.com/spf13/afero v1.2.2
+ github.com/spf13/pflag v1.0.5
+ github.com/stretchr/testify v1.6.1
+- golang.org/x/net v0.0.0-20200625001655-4c5254603344 // indirect
+- golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d
+- k8s.io/apimachinery v0.18.6
+- k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6
+- sigs.k8s.io/controller-runtime v0.6.0
+- sigs.k8s.io/kustomize/kyaml v0.1.11
+- sigs.k8s.io/yaml v1.2.0
++ golang.org/x/sys v0.0.0-20210108172913-0df2131ae363
++ k8s.io/api v0.19.2
++ k8s.io/apimachinery v0.19.6
++ k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6
++ k8s.io/utils v0.0.0-20200912215256-4140de9c8800
++ sigs.k8s.io/controller-runtime v0.7.0
++ sigs.k8s.io/kustomize/kyaml v0.10.5
+ )
+diff --git a/go.sum b/go.sum
+index c1ecf37..b401269 100644
+--- a/go.sum
++++ b/go.sum
+@@ -1,28 +1,34 @@
+-bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
+ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+ cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
++cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
++cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
++cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
++cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
++cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw=
++cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
++cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
++cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
++cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
++dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+ github.com/360EntSecGroup-Skylar/excelize v1.4.1/go.mod h1:vnax29X2usfl7HHkBrX5EvSCJcmH3dT9luvxzu8iGAE=
+ github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+ github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
++github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
+ github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
++github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
+ github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
++github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
+ github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
+ github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
++github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
+ github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
+ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
++github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
+ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+-github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E=
+-github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
+-github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
+-github.com/Masterminds/sprig/v3 v3.0.2/go.mod h1:oesJ8kPONMONaZgtiHNzUShJbksypC5kWczhZAf6+aU=
+-github.com/Masterminds/vcs v1.13.1/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA=
+-github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
+-github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
+ github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
+-github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM=
+ github.com/PuerkitoBio/goquery v1.5.0/go.mod h1:qD2PgZ9lccMbQlc7eEOjaeRlFQON7xY8kdmcsrnKqMg=
+ github.com/PuerkitoBio/purell v1.0.0 h1:0GoNN3taZV6QI81IXgCbxMyEaJDXMSIjArYBCYzVVvs=
+ github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+@@ -33,12 +39,13 @@ github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2 h1:JCHLVE3B+kJd
+ github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
+ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+-github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
+ github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
+ github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs=
+ github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
+ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
++github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
++github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+ github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
+ github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
+ github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
+@@ -48,72 +55,46 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd
+ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
+ github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
+ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
+-github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg=
+ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+-github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4=
++github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
++github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
+ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+-github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+ github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+-github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
+-github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+-github.com/bombsimon/wsl v1.2.5/go.mod h1:43lEF/i0kpXbLCeDXL9LMT8c92HyBywXb0AsgMHYngM=
+ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
+ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+-github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
++github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
++github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+ github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
+-github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
+-github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
+-github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+-github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+-github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
+-github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY=
+-github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
+-github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
+-github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
+-github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
+ github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
+ github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+-github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
+ github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
+ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+ github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+ github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+ github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+-github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+-github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
+ github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
+ github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
+ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+-github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
+-github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+-github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE=
+-github.com/deislabs/oras v0.8.1/go.mod h1:Mx0rMSbBNaNfY9hjpccEnxkOqJL6KGjtxNHPLC4G4As=
+ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+ github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
+-github.com/docker/cli v0.0.0-20200130152716-5d0cf8839492/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+-github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
+-github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+-github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+-github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
+-github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
+ github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+ github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+ github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
++github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
+ github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
+ github.com/dustmop/soup v1.1.2-0.20190516214245-38228baa104e/go.mod h1:CgNC6SGbT+Xb8wGGvzilttZL1mc5sQ/5KkcxsZttMIk=
+-github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
+ github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
+ github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633 h1:H2pdYOb3KQ1/YsqVWoWNLQO+fusocsw354rqGTZtAgw=
+ github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+@@ -123,18 +104,15 @@ github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg
+ github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
+ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
+-github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+ github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M=
+ github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+-github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
+-github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
++github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses=
++github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
+-github.com/fluxcd/go-git-providers v0.0.2 h1:NGJeJl1TOJKbxtQkRL9JOk5lIopR1XNi6hGgZC5+8IE=
+-github.com/fluxcd/go-git-providers v0.0.2/go.mod h1:2Fp9GDxIcllNR7pm5clXhInPyue4VggecaH83KhkpNw=
+-github.com/fluxcd/kustomize-controller v0.0.1-beta.2/go.mod h1:mLeipvpQkyof6b5IHNtqeA8CmbjfVIf92UyKkpeBY98=
+-github.com/fluxcd/source-controller v0.0.1-beta.2/go.mod h1:tmscNdCxEt7+Xt2g1+bI38hMPw2leYMFAaCn4UlMGuw=
+-github.com/fluxcd/toolkit v0.0.1-beta.2 h1:JG80AUIGd936QJ6Vs/xZweoKcE6j7Loua5Wn6Q/pVh8=
+-github.com/fluxcd/toolkit v0.0.1-beta.2/go.mod h1:NqDXj2aeVMbVkrCHeP/r0um+edXXyeGlG/9pKZLqGdM=
++github.com/fluxcd/go-git-providers v0.0.3 h1:pquQvTpd1a4V1efPyZWuVPeIKrTgV8QRoDY0VGH+qiw=
++github.com/fluxcd/go-git-providers v0.0.3/go.mod h1:iaXf3nEq8MB/LzxfbNcCl48sAtIReUU7jqjJ7CEnfFQ=
++github.com/fluxcd/pkg/ssh v0.0.5 h1:rnbFZ7voy2JBlUfMbfyqArX2FYaLNpDhccGFC3qW83A=
++github.com/fluxcd/pkg/ssh v0.0.5/go.mod h1:7jXPdXZpc0ttMNz2kD9QuMi3RNn/e0DOFbj0Tij/+Hs=
+ github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=
+ github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
+ github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
+@@ -147,27 +125,28 @@ github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0=
+ github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
+ github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
+ github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
+-github.com/go-critic/go-critic v0.3.5-0.20190904082202-d79a9f0c64db/go.mod h1:+sE8vrLDS2M0pZkBk0wy6+nLdKexVDrl/jBqQOTDThA=
+ github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w=
+ github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
+ github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4=
+ github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E=
+ github.com/go-git/go-billy/v5 v5.0.0 h1:7NQHvd9FVid8VL4qVUMm8XifBK+2xCoZ2lSk0agRrHM=
+ github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
+-github.com/go-git/go-git-fixtures/v4 v4.0.1 h1:q+IFMfLx200Q3scvt2hN79JsEzy4AmBTp/pqnefH+Bc=
+-github.com/go-git/go-git-fixtures/v4 v4.0.1/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw=
+-github.com/go-git/go-git/v5 v5.0.0/go.mod h1:oYD8y9kWsGINPFJoLdaScGCN6dlKg23blmClfZwtUVA=
+-github.com/go-git/go-git/v5 v5.1.0 h1:HxJn9g/E7eYvKW3Fm7Jt4ee8LXfPOm/H1cdDu8vEssk=
+-github.com/go-git/go-git/v5 v5.1.0/go.mod h1:ZKfuPUoY1ZqIG4QG9BDBh3G4gLM5zvPuSJAozQrZuyM=
++github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12 h1:PbKy9zOy4aAKrJ5pibIRpVO2BXnK1Tlcg+caKI7Ox5M=
++github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw=
++github.com/go-git/go-git/v5 v5.2.0 h1:YPBLG/3UK1we1ohRkncLjaXWLW+HKp5QNM/jTli2JgI=
++github.com/go-git/go-git/v5 v5.2.0/go.mod h1:kh02eMX+wdqqxgNMEyq8YgwlIOsDOa9homkUq1PoTMs=
++github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+-github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM=
++github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
+ github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg=
+ github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
+-github.com/go-logr/zapr v0.1.0 h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54=
+-github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk=
+-github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
++github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
++github.com/go-logr/logr v0.3.0 h1:q4c+kbcR0d5rSurhBR8dIgieOaYpXtsdTYfx22Cu6rs=
++github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
++github.com/go-logr/zapr v0.2.0 h1:v6Ji8yBW77pva6NkJKQdHLAJKrIJKRHz0RXwPqCHSR4=
++github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU=
+ github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
+ github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
+ github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
+@@ -184,6 +163,8 @@ github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9
+ github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
+ github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w=
+ github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
++github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
++github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+ github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9 h1:tF+augKRWlWx0J0B7ZyyKSiTyV6E1zZe+7b3qQlcEf8=
+ github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
+ github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
+@@ -192,6 +173,8 @@ github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq
+ github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
+ github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o=
+ github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
++github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM=
++github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
+ github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
+ github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
+ github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
+@@ -209,8 +192,8 @@ github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wab
+ github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
+ github.com/go-openapi/spec v0.19.5 h1:Xm0Ao53uqnk9QE/LlYV5DEU09UAgpliA85QoT9LzqPw=
+ github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk=
+-github.com/go-openapi/spec v0.19.8 h1:qAdZLh1r6QF/hI/gTq+TJTvsQUodZsM7KLqkAJdiJNg=
+-github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk=
++github.com/go-openapi/spec v0.20.0 h1:HGLc8AJ7ynOxwv0Lq4TsnwLsWMawHAYiJIFzbcML86I=
++github.com/go-openapi/spec v0.20.0/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU=
+ github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
+ github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
+ github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY=
+@@ -224,71 +207,42 @@ github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88d
+ github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+ github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY=
+ github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
++github.com/go-openapi/swag v0.19.12 h1:Bc0bnY2c3AoF7Gc+IMIAQQsD8fLHjHpc19wXvYuayQI=
++github.com/go-openapi/swag v0.19.12/go.mod h1:eFdyEBkTdoAf/9RXBvj4cr1nH7GD8Kzo5HTt47gr72M=
+ github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
+ github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA=
+ github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4=
+ github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4=
+ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+-github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4=
+-github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ=
+-github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY=
+-github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY=
+-github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086/go.mod h1:mP93XdblcopXwlyN4X4uodxXQhldPGZbcEJIimQHrkg=
+-github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw=
+-github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU=
+-github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30/go.mod h1:SV2ur98SGypH1UjcPpCatrV5hPazG6+IfNHbkDXBRrk=
+-github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI=
+-github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoMrjiy4zvdS+Bg6z9jZH82QXwkcgCBX6nOfnmdaHks=
+-github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc=
+-github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8=
+-github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU=
+-github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
+-github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
+-github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
+-github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
++github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM=
+ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+ github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+-github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+ github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
+ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+ github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+-github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+ github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
++github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
++github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA=
++github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+ github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+-github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
++github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+ github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+ github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
+ github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+ github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+ github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
+ github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
++github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+ github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+ github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
++github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
+ github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
+ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+-github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4=
+-github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk=
+-github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0=
+-github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8=
+-github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3/go.mod h1:JXrF4TWy4tXYn62/9x8Wm/K/dm06p8tCKwFRDPZG/1o=
+-github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU=
+-github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU=
+-github.com/golangci/golangci-lint v1.21.0/go.mod h1:phxpHK52q7SE+5KpPnti4oZTdFCEsn/tKN+nFvCKXfk=
+-github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU=
+-github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg=
+-github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o=
+-github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA=
+-github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI=
+-github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4=
+-github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ=
+-github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho=
+-github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8=
+-github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk=
+ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+@@ -297,32 +251,31 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
+ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+ github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
+ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+-github.com/google/go-github/v32 v32.0.0 h1:q74KVb22spUq0U5HqZ9VCYqQz8YRuOtL/39ZnfwO+NM=
+-github.com/google/go-github/v32 v32.0.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI=
++github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
++github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+ github.com/google/go-github/v32 v32.1.0 h1:GWkQOdXqviCPx7Q7Fj+KyPoGm4SwHRh8rheoPhd27II=
+ github.com/google/go-github/v32 v32.1.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI=
+ github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
+ github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
+-github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
+ github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
+ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+ github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
+ github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+ github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
++github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
++github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+-github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
+ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+ github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
+ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+ github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+-github.com/googleapis/gnostic v0.3.0 h1:CcQijm0XKekKjP/YCz28LXVSpgguuB+nCxaSjCe09y0=
+-github.com/googleapis/gnostic v0.3.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
+-github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
++github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
++github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
++github.com/googleapis/gnostic v0.5.1 h1:A8Yhf6EtqTv9RMsU6MQTyrtV1TjWlR6xU9BsZIwuTCM=
++github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
+ github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+ github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+-github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE=
+-github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo=
+ github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+ github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA=
+ github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+@@ -331,38 +284,36 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de
+ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+ github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+ github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+-github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+-github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+ github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+ github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
+-github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
+ github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
+-github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I=
+-github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+ github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
+ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
++github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
++github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
+ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
+ github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
+ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+-github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4=
++github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+-github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+-github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+ github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg=
+ github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
++github.com/imdario/mergo v0.3.10 h1:6q5mVkdH/vYmqngx7kZQTjJ5HRsx+ImorDIEQ+beJgc=
++github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+ github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
+ github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
+ github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
+ github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+-github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
++github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
++github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+ github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+ github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+-github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
+-github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
++github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
++github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+-github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a/go.mod h1:UJSiEoRfvx3hP73CvoARgeLjaIOjybY9vj8PUPPFGeU=
++github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+ github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
+ github.com/k0kubun/pp v2.3.0+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg=
+@@ -371,10 +322,6 @@ github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT
+ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+-github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+-github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
+-github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
+-github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
+ github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
+ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+ github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
+@@ -382,9 +329,9 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv
+ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
+ github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
++github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+ github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
+-github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
+ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+@@ -394,11 +341,6 @@ github.com/labstack/echo v3.3.10+incompatible h1:pGRcYk231ExFAyoAjAfD85kQzRJCRI8
+ github.com/labstack/echo v3.3.10+incompatible/go.mod h1:0INS7j/VjnFxD4E2wkz67b8cVwCLbBmJyDaka6Cmk1s=
+ github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0=
+ github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
+-github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
+-github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
+-github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc=
+-github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
+-github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
+ github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+ github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a h1:TpvdAwDAt1K4ANVOfcihouRdvP+MgAfDWwBuct4l6ZY=
+ github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+@@ -409,13 +351,12 @@ github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN
+ github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+ github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM=
+ github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
+-github.com/manifoldco/promptui v0.7.0/go.mod h1:n4zTdgP0vr0S3w7/O/g98U+e0gwLScEXGwov2nIKuGQ=
+-github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s=
++github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
++github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
++github.com/markbates/pkger v0.17.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI=
+ github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
+ github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
+ github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+-github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
+-github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+ github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+ github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE=
+@@ -425,35 +366,27 @@ github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2y
+ github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
+ github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+ github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+-github.com/mattn/go-shellwords v1.0.9/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
+-github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
+ github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
+ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+-github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
+-github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
++github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
++github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+ github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+ github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+-github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk=
+-github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
+-github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
+ github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+ github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+-github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
++github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
+ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+-github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
+ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+ github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
+-github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
+-github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk=
++github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=
+ github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
+-github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU=
+ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
+ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
+ github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
+@@ -463,44 +396,37 @@ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:v
+ github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+ github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw=
+ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+-github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+ github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw=
+ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+ github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
+ github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
++github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4=
++github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
+ github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+ github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+-github.com/onsi/gomega v1.8.1 h1:C5Dqfs/LeauYDX0jJXIe2SWmwCbGzx9yF8C8xy3Lh34=
+-github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
+ github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
+ github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+-github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+-github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
+-github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+-github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+-github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
+-github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+-github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
++github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs=
++github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+ github.com/paulmach/orb v0.1.3/go.mod h1:VFlX/8C+IQ1p6FTRRKzKoOPJnvEtA5G0Veuqwbu//Vk=
+ github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
+ github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+ github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
+-github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE=
+ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+-github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+ github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
+ github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+-github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+ github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+ github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
+ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
+ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
++github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA=
++github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
+ github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+ github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
+@@ -509,155 +435,150 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T
+ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
++github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc=
++github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
+ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+ github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
+-github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
++github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8=
++github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+ github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
+ github.com/qri-io/starlib v0.4.2-0.20200213133954-ff2e8cd5ef8d/go.mod h1:7DPO4domFU579Ga6E61sB9VFNaniPVwJP5C4bBCu3wA=
+-github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI=
+-github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
+ github.com/rjeczalik/notify v0.9.2 h1:MiTWrPj55mNDHEiIX5YUSKefw/+lCQVoAFmD6oQm5w8=
+ github.com/rjeczalik/notify v0.9.2/go.mod h1:aErll2f0sUX9PXZnVNyeiObbmTlk5jnMoCa4QEjJeqM=
+ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
+-github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+ github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+-github.com/securego/gosec v0.0.0-20191002120514-e680875ea14d/go.mod h1:w5+eXa0mYznDkHaMCXA4XYffjlH+cy1oyKbfzJXa2Do=
+ github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
+ github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
+ github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
+-github.com/shirou/gopsutil v0.0.0-20190901111213-e4ec7b275ada/go.mod h1:WWnYX4lzhCH5h/3YBfyVA3VbLYjlMZZAQcW9ojMexNc=
+-github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc=
+-github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
+-github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
+ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
+-github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
+ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+-github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
+ github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
+ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+ github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
+ github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
++github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
++github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+ github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+-github.com/sosedoff/gitkit v0.2.1-0.20191202022816-7182d43c6254/go.mod h1:A+o6ZazfVJwetlcHz3ah6th66XcBdsyzLo+aBt/AsK4=
+-github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE=
+ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
++github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
+ github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
+ github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+-github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+ github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+-github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
+ github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
+ github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+ github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+-github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+ github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+-github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
+ github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
++github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
+ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+ github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
+ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
+-github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+ github.com/stretchr/testify v1.2.3-0.20181224173747-660f15d67dbb/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+ github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+ github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
+ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
++github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+ github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
+ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+-github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+ github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
+-github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk=
+ github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+ github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+ github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
+-github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
+-github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
+-github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA=
+-github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA=
+-github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
+ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
+-github.com/uudashr/gocognit v0.0.0-20190926065955-1655d0de0517/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM=
+ github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
+ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
+-github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s=
+ github.com/valyala/fasttemplate v1.0.1 h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8WdUSz8=
+ github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
+-github.com/valyala/quicktemplate v1.2.0/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4=
+-github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
+ github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
+ github.com/xanzy/go-gitlab v0.33.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug=
+ github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70=
+ github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4=
+-github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
+-github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
+-github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
+-github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
+ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+-github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8=
+ github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
+ github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
+-github.com/yujunz/go-getter v1.4.1-lite/go.mod h1:sbmqxXjyLunH1PkF3n7zSlnVeMvmYUuIl9ZVs/7NyCc=
++github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+ go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+ go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+-go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
++go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
++go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8=
+ go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
+ go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
+ go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
+ go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
++go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+ go.starlark.net v0.0.0-20190528202925-30ae18b8564f/go.mod h1:c1/X6cHgvdXj6pUlmWKMkuqRnW4K8x2vwt6JAaaircg=
+ go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o=
+ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+ go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
+ go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
++go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
++go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
++go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=
++go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
+ go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
+ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
++go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
++go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
++go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
++go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
++go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+ go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
+ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+-golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
++go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM=
++go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
+ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+-golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+-golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+ golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+ golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
++golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+ golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU=
+ golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+ golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+-golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+-golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+-golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
+-golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+-golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
++golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
++golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+ golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073 h1:xMPOj6Pz6UipU1wXLkrtqpHbR0AVFnyPEQq/wRWz9lM=
+ golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
+ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+ golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+-golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+-golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
++golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
++golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
++golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
++golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+ golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
++golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
+ golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+ golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
++golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
++golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
++golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
++golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE=
++golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+ golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
++golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
+ golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
+-golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
++golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
++golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
++golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
++golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
++golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+ golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+ golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+ golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+ golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+-golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+ golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+ golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+ golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+@@ -669,26 +590,32 @@ golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn
+ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
+ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+ golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
++golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+ golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
++golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
+ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU=
+ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+ golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+ golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+-golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+ golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
++golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+ golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0=
+ golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
++golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+-golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4=
+-golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
++golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
++golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME=
++golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+ golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+ golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+ golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
+ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
++golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs=
++golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+@@ -696,7 +623,6 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ
+ golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
+ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+-golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+ golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+@@ -704,9 +630,6 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h
+ golang.org/x/sys v0.0.0-20180926160741-c2ed4eda69e7/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+ golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+ golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+-golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+-golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+-golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+ golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+ golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+@@ -716,88 +639,116 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w
+ golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
+ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+-golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
++golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
++golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+ golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4=
+ golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
++golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+ golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+ golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+ golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+-golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+-golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+ golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+ golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+-golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
++golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+ golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
++golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
++golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
++golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+ golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
++golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+ golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+ golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+-golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d h1:QQrM/CCYEzTs91GZylDCQjGHudbPTxF/1fvXdVh5lMo=
+-golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+-golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
++golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
++golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
++golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
++golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY=
++golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
++golang.org/x/sys v0.0.0-20210108172913-0df2131ae363 h1:wHn06sgWHMO1VsQ8F+KzDJx/JzqfsNLnc+oEi07qD7s=
++golang.org/x/sys v0.0.0-20210108172913-0df2131ae363/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+ golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
+ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
++golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
++golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
++golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc=
++golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
+ golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
++golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
+ golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
++golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s=
++golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+ golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+-golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+ golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+ golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+-golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+-golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+ golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+-golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+-golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+ golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+ golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+-golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+-golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
++golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
++golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
++golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+ golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59 h1:QjA/9ArTfVTLfEhClDCG7SGrZkZixxWpwNCDiwJfh88=
+ golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+ golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+-golang.org/x/tools v0.0.0-20190719005602-e377ae9d6386/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
+-golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+-golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+-golang.org/x/tools v0.0.0-20190930201159-7c411dea38b0/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+-golang.org/x/tools v0.0.0-20191010075000-0337d82405ff h1:XdBG6es/oFDr1HwaxkxgVve7NB281QhxgK/i4voubFs=
+-golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
++golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
++golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
++golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
++golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
++golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
++golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
++golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
++golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
++golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
++golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
++golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
++golang.org/x/tools v0.0.0-20200616133436-c1934b75d054 h1:HHeAlu5H9b71C+Fx0K+1dGgVFN1DM1/wz4aoGOA5qS8=
++golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
++golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+-gomodules.xyz/jsonpatch/v2 v2.0.1 h1:xyiBuvkD2g5n7cYzx6u2sxQvsAy4QJsZFCzGVdzOXZ0=
+-gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU=
+-gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0=
+-gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
+-gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ=
++gomodules.xyz/jsonpatch/v2 v2.1.0 h1:Phva6wqu+xR//Njw6iorylFFgn/z547tw5Ne3HZPQ+k=
++gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU=
+ google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
++google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
++google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
++google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
++google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+ google.golang.org/appengine v1.0.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+ google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+ google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+ google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
+ google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
++google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
++google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
++google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc=
++google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+ google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+ google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+ google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+ google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+ google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
++google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
++google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
++google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
++google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+ google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+ google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
++google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+-google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+ google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+@@ -805,9 +756,12 @@ google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ
+ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
++google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+ google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
+ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+-gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
++google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
++google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=
++google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
+ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
+@@ -820,7 +774,6 @@ gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS
+ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
+ gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
+ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+-gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
+ gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
+ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+ gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
+@@ -835,93 +788,58 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+ gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
++gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+ gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+ gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
+ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+ gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
+ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+-gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2 h1:XZx7nhd5GMaZpmDaEHFVafUZC7ya0fuo7cSJ3UCKYmM=
+-gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
++gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
++gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
+ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
+ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
++gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ=
++gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+ gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
+-helm.sh/helm/v3 v3.1.2/go.mod h1:WYsFJuMASa/4XUqLyv54s0U/f3mlAaRErGmyy4z921g=
++gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
+ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
++honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
++honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
+ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
+-k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI=
+-k8s.io/api v0.17.2/go.mod h1:BS9fjjLc4CMuqfSO8vgbHPKMt5+SF0ET6u/RVDihTo4=
+-k8s.io/api v0.18.2 h1:wG5g5ZmSVgm5B+eHMIbI9EGATS2L8Z72rda19RIEgY8=
+-k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78=
+-k8s.io/apiextensions-apiserver v0.17.2/go.mod h1:4KdMpjkEjjDI2pPfBA15OscyNldHWdBCfsWMDWAmSTs=
+-k8s.io/apiextensions-apiserver v0.18.2 h1:I4v3/jAuQC+89L3Z7dDgAiN4EOjN6sbm6iBqQwHTah8=
+-k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJFlKL37fC4ZvY=
+-k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
+-k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
+-k8s.io/apimachinery v0.18.2 h1:44CmtbmkzVDAhCpRVSiP2R5PPrC2RtlIv/MoB8xpdRA=
+-k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA=
+-k8s.io/apimachinery v0.18.6 h1:RtFHnfGNfd1N0LeSrKCUznz5xtUP1elRGvHJbL3Ntag=
+-k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
+-k8s.io/apiserver v0.17.2/go.mod h1:lBmw/TtQdtxvrTk0e2cgtOxHizXI+d0mmGQURIHQZlo=
+-k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw=
+-k8s.io/cli-runtime v0.17.2/go.mod h1:aa8t9ziyQdbkuizkNLAw3qe3srSyWh9zlSB7zTqRNPI=
+-k8s.io/client-go v0.17.0/go.mod h1:TYgR6EUHs6k45hb6KWjVD6jFZvJV4gHDikv/It0xz+k=
+-k8s.io/client-go v0.17.2/go.mod h1:QAzRgsa0C2xl4/eVpeVAZMvikCn8Nm81yqVx3Kk9XYI=
+-k8s.io/client-go v0.18.2 h1:aLB0iaD4nmwh7arT2wIn+lMnAq7OswjaejkQ8p9bBYE=
+-k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU=
+-k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s=
+-k8s.io/code-generator v0.18.2 h1:C1Nn2JiMf244CvBDKVPX0W2mZFJkVBg54T8OV7/Imso=
+-k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc=
+-k8s.io/component-base v0.17.2/go.mod h1:zMPW3g5aH7cHJpKYQ/ZsGMcgbsA/VyhEugF3QT1awLs=
+-k8s.io/component-base v0.18.2 h1:SJweNZAGcUvsypLGNPNGeJ9UgPZQ6+bW+gEHe8uyh/Y=
+-k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM=
+-k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+-k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+-k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 h1:RPscN6KhmG54S33L+lr3GS+oD1jmchIU0ll519K6FA4=
+-k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+-k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
+-k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
+-k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
+-k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
+-k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
+-k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDNZ16KJtb49HfR3ZHB9qYXM=
+-k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
+-k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY=
+-k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
+-k8s.io/kubectl v0.17.2/go.mod h1:y4rfLV0n6aPmvbRCqZQjvOp3ezxsFgpqL+zF5jH/lxk=
+-k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
+-k8s.io/metrics v0.17.2/go.mod h1:3TkNHET4ROd+NfzNxkjoVfQ0Ob4iZnaHmSEA4vYpwLw=
+-k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
+-k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU=
+-k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
+-modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
+-modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
+-modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
+-modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
+-modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I=
+-mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc=
+-mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4=
+-mvdan.cc/unparam v0.0.0-20190720180237-d51796306d8f/go.mod h1:4G1h5nDURzA3bwVMZIVpwbkw+04kSxk3rAtzlimaUJw=
+-rsc.io/letsencrypt v0.0.3/go.mod h1:buyQKZ6IXrRnB7TdkHP0RyEybLx18HHyOSoTyoOLqNY=
+-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0=
+-sigs.k8s.io/controller-runtime v0.5.0/go.mod h1:REiJzC7Y00U+2YkMbT8wxgrsX5USpXKGhb2sCtAXiT8=
+-sigs.k8s.io/controller-runtime v0.6.0 h1:Fzna3DY7c4BIP6KwfSlrfnj20DJ+SeMBK8HSFvOk9NM=
+-sigs.k8s.io/controller-runtime v0.6.0/go.mod h1:CpYf5pdNY/B352A1TFLAS2JVSlnGQ5O2cftPHndTroo=
+-sigs.k8s.io/kustomize v2.0.3+incompatible h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0=
+-sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU=
+-sigs.k8s.io/kustomize/api v0.4.1/go.mod h1:NqxqT+wbYHrD0P19Uu4dXiMsVwI1IwQs+MJHlLhmPqQ=
+-sigs.k8s.io/kustomize/kyaml v0.1.11 h1:/VvWxVIgH5gG1K4A7trgbyLgO3tRBiAWNhLFVU1HEmo=
+-sigs.k8s.io/kustomize/kyaml v0.1.11/go.mod h1:72/rLkSi+L/pHM1oCjwrf3ClU+tH5kZQvvdLSqIHwWU=
+-sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
+-sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06 h1:zD2IemQ4LmOcAumeiyDWXKUI2SO0NYDe3H6QGvPOVgU=
+-sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18=
+-sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
+-sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E=
+-sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
++k8s.io/api v0.19.2 h1:q+/krnHWKsL7OBZg/rxnycsl9569Pud76UJ77MvKXms=
++k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI=
++k8s.io/apiextensions-apiserver v0.19.2 h1:oG84UwiDsVDu7dlsGQs5GySmQHCzMhknfhFExJMz9tA=
++k8s.io/apiextensions-apiserver v0.19.2/go.mod h1:EYNjpqIAvNZe+svXVx9j4uBaVhTB4C94HkY3w058qcg=
++k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA=
++k8s.io/apimachinery v0.19.6 h1:kBLzSGuDdY1NdSV2uFzI+FwZ9wtkmG+X3ZVcWXSqNgA=
++k8s.io/apimachinery v0.19.6/go.mod h1:6sRbGRAVY5DOCuZwB5XkqguBqpqLU6q/kOaOdk29z6Q=
++k8s.io/apiserver v0.19.2/go.mod h1:FreAq0bJ2vtZFj9Ago/X0oNGC51GfubKK/ViOKfVAOA=
++k8s.io/client-go v0.19.2 h1:gMJuU3xJZs86L1oQ99R4EViAADUPMHHtS9jFshasHSc=
++k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA=
++k8s.io/code-generator v0.19.2/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk=
++k8s.io/component-base v0.19.2 h1:jW5Y9RcZTb79liEhW3XDVTW7MuvEGP0tQZnfSX6/+gs=
++k8s.io/component-base v0.19.2/go.mod h1:g5LrsiTiabMLZ40AR6Hl45f088DevyGY+cCE2agEIVo=
++k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
++k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
++k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
++k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A=
++k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
++k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ=
++k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
++k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
++k8s.io/utils v0.0.0-20200912215256-4140de9c8800 h1:9ZNvfPvVIEsp/T1ez4GQuzCcCTEQWhovSofhqR73A6g=
++k8s.io/utils v0.0.0-20200912215256-4140de9c8800/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
++rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
++sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0=
++sigs.k8s.io/controller-runtime v0.7.0 h1:bU20IBBEPccWz5+zXpLnpVsgBYxqclaHu1pVDl/gEt8=
++sigs.k8s.io/controller-runtime v0.7.0/go.mod h1:pJ3YBrJiAqMAZKi6UVGuE98ZrroV1p+pIhoHsMm9wdU=
++sigs.k8s.io/kustomize/kyaml v0.10.5 h1:PbJcsZsEM7O3hHtUWTR+4WkHVbQRW9crSy75or1gRbI=
++sigs.k8s.io/kustomize/kyaml v0.10.5/go.mod h1:P6Oy/ah/GZMKzJMIJA2a3/bc8YrBkuL5kJji13PSIzY=
++sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA=
++sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+ sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
+ sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
+ sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
+ sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
+-sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
+-vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI=
+diff --git a/hack/generate-client.sh b/hack/generate-client.sh
+deleted file mode 100755
+index b7e5853..0000000
+--- a/hack/generate-client.sh
++++ /dev/null
+@@ -1,16 +0,0 @@
+-#!/bin/bash
+-
+-SCRIPT_DIR=$( dirname "${BASH_SOURCE[0]}" )
+-cd ${SCRIPT_DIR}/..
+-
+-RESOURCES="Car Motorcycle"
+-CLIENT_NAME=SampleInternal
+-OUT_DIR=cmd/sample-app/client
+-API_DIR="github.com/weaveworks/libgitops/cmd/sample-app/apis/sample"
+-mkdir -p ${OUT_DIR}
+-for Resource in ${RESOURCES}; do
+- resource=$(echo "${Resource}" | awk '{print tolower($0)}')
+- sed -e "s|Resource|${Resource}|g;s|resource|${resource}|g;/build ignore/d;s|API_DIR|${API_DIR}|g;s|*Client|*${CLIENT_NAME}Client|g" \
+- pkg/client/client_resource_template.go > \
+- ${OUT_DIR}/zz_generated.client_${resource}.go
+-done
+diff --git a/pkg/client/client_dynamic.go b/pkg/client/client_dynamic.go
+deleted file mode 100644
+index 5f3ac2a..0000000
+--- a/pkg/client/client_dynamic.go
++++ /dev/null
+@@ -1,97 +0,0 @@
+-// +build ignore
+-
+-package client
+-
+-import (
+- "fmt"
+-
+- "github.com/weaveworks/libgitops/pkg/runtime"
+- "github.com/weaveworks/libgitops/pkg/storage"
+- "github.com/weaveworks/libgitops/pkg/storage/filterer"
+- "k8s.io/apimachinery/pkg/runtime/schema"
+-)
+-
+-// DynamicClient is an interface for accessing API types generically
+-type DynamicClient interface {
+- // New returns a new Object of its kind
+- New() runtime.Object
+- // Get returns an Object matching the UID from the storage
+- Get(runtime.UID) (runtime.Object, error)
+- // Set saves an Object into the persistent storage
+- Set(runtime.Object) error
+- // Patch performs a strategic merge patch on the object with
+- // the given UID, using the byte-encoded patch given
+- Patch(runtime.UID, []byte) error
+- // Find returns an Object based on the given filter, filters can
+- // match e.g. the Object's Name, UID or a specific property
+- Find(filter filterer.BaseFilter) (runtime.Object, error)
+- // FindAll returns multiple Objects based on the given filter, filters can
+- // match e.g. the Object's Name, UID or a specific property
+- FindAll(filter filterer.BaseFilter) ([]runtime.Object, error)
+- // Delete deletes an Object from the storage
+- Delete(uid runtime.UID) error
+- // List returns a list of all Objects available
+- List() ([]runtime.Object, error)
+-}
+-
+-// dynamicClient is a struct implementing the DynamicClient interface
+-// It uses a shared storage instance passed from the Client together with its own Filterer
+-type dynamicClient struct {
+- storage storage.Storage
+- gvk schema.GroupVersionKind
+- filterer *filterer.Filterer
+-}
+-
+-// NewDynamicClient builds the dynamicClient struct using the storage implementation and a new Filterer
+-func NewDynamicClient(s storage.Storage, gvk schema.GroupVersionKind) DynamicClient {
+- return &dynamicClient{
+- storage: s,
+- gvk: gvk,
+- filterer: filterer.NewFilterer(s),
+- }
+-}
+-
+-// New returns a new Object of its kind
+-func (c *dynamicClient) New() runtime.Object {
+- obj, err := c.storage.New(c.gvk)
+- if err != nil {
+- panic(fmt.Sprintf("Client.New must not return an error: %v", err))
+- }
+- return obj
+-}
+-
+-// Get returns an Object based the given UID
+-func (c *dynamicClient) Get(uid runtime.UID) (runtime.Object, error) {
+- return c.storage.Get(c.gvk, uid)
+-}
+-
+-// Set saves an Object into the persistent storage
+-func (c *dynamicClient) Set(resource runtime.Object) error {
+- return c.storage.Set(c.gvk, resource)
+-}
+-
+-// Patch performs a strategic merge patch on the object with
+-// the given UID, using the byte-encoded patch given
+-func (c *dynamicClient) Patch(uid runtime.UID, patch []byte) error {
+- return c.storage.Patch(c.gvk, uid, patch)
+-}
+-
+-// Find returns an Object based on a given Filter
+-func (c *dynamicClient) Find(filter filterer.BaseFilter) (runtime.Object, error) {
+- return c.filterer.Find(c.gvk, filter)
+-}
+-
+-// FindAll returns multiple Objects based on a given Filter
+-func (c *dynamicClient) FindAll(filter filterer.BaseFilter) ([]runtime.Object, error) {
+- return c.filterer.FindAll(c.gvk, filter)
+-}
+-
+-// Delete deletes the Object from the storage
+-func (c *dynamicClient) Delete(uid runtime.UID) error {
+- return c.storage.Delete(c.gvk, uid)
+-}
+-
+-// List returns a list of all Objects available
+-func (c *dynamicClient) List() ([]runtime.Object, error) {
+- return c.storage.List(c.gvk)
+-}
+diff --git a/pkg/client/client_resource_template.go b/pkg/client/client_resource_template.go
+deleted file mode 100644
+index 53bc874..0000000
+--- a/pkg/client/client_resource_template.go
++++ /dev/null
+@@ -1,152 +0,0 @@
+-// +build ignore
+-
+-/*
+- Note: This file is autogenerated! Do not edit it manually!
+- Edit client_resource_template.go instead, and run
+- hack/generate-client.sh afterwards.
+-*/
+-
+-package client
+-
+-import (
+- "fmt"
+-
+- api "API_DIR"
+-
+- log "github.com/sirupsen/logrus"
+- "github.com/weaveworks/libgitops/pkg/runtime"
+- "github.com/weaveworks/libgitops/pkg/storage"
+- "github.com/weaveworks/libgitops/pkg/storage/filterer"
+- "k8s.io/apimachinery/pkg/runtime/schema"
+-)
+-
+-// ResourceClient is an interface for accessing Resource-specific API objects
+-type ResourceClient interface {
+- // New returns a new Resource
+- New() *api.Resource
+- // Get returns the Resource matching given UID from the storage
+- Get(runtime.UID) (*api.Resource, error)
+- // Set saves the given Resource into persistent storage
+- Set(*api.Resource) error
+- // Patch performs a strategic merge patch on the object with
+- // the given UID, using the byte-encoded patch given
+- Patch(runtime.UID, []byte) error
+- // Find returns the Resource matching the given filter, filters can
+- // match e.g. the Object's Name, UID or a specific property
+- Find(filter filterer.BaseFilter) (*api.Resource, error)
+- // FindAll returns multiple Resources matching the given filter, filters can
+- // match e.g. the Object's Name, UID or a specific property
+- FindAll(filter filterer.BaseFilter) ([]*api.Resource, error)
+- // Delete deletes the Resource with the given UID from the storage
+- Delete(uid runtime.UID) error
+- // List returns a list of all Resources available
+- List() ([]*api.Resource, error)
+-}
+-
+-// Resources returns the ResourceClient for the Client object
+-func (c *Client) Resources() ResourceClient {
+- if c.resourceClient == nil {
+- c.resourceClient = newResourceClient(c.storage, c.gv)
+- }
+-
+- return c.resourceClient
+-}
+-
+-// resourceClient is a struct implementing the ResourceClient interface
+-// It uses a shared storage instance passed from the Client together with its own Filterer
+-type resourceClient struct {
+- storage storage.Storage
+- filterer *filterer.Filterer
+- gvk schema.GroupVersionKind
+-}
+-
+-// newResourceClient builds the resourceClient struct using the storage implementation and a new Filterer
+-func newResourceClient(s storage.Storage, gv schema.GroupVersion) ResourceClient {
+- return &resourceClient{
+- storage: s,
+- filterer: filterer.NewFilterer(s),
+- gvk: gv.WithKind(api.KindResource.Title()),
+- }
+-}
+-
+-// New returns a new Object of its kind
+-func (c *resourceClient) New() *api.Resource {
+- log.Tracef("Client.New; GVK: %v", c.gvk)
+- obj, err := c.storage.New(c.gvk)
+- if err != nil {
+- panic(fmt.Sprintf("Client.New must not return an error: %v", err))
+- }
+- return obj.(*api.Resource)
+-}
+-
+-// Find returns a single Resource based on the given Filter
+-func (c *resourceClient) Find(filter filterer.BaseFilter) (*api.Resource, error) {
+- log.Tracef("Client.Find; GVK: %v", c.gvk)
+- object, err := c.filterer.Find(c.gvk, filter)
+- if err != nil {
+- return nil, err
+- }
+-
+- return object.(*api.Resource), nil
+-}
+-
+-// FindAll returns multiple Resources based on the given Filter
+-func (c *resourceClient) FindAll(filter filterer.BaseFilter) ([]*api.Resource, error) {
+- log.Tracef("Client.FindAll; GVK: %v", c.gvk)
+- matches, err := c.filterer.FindAll(c.gvk, filter)
+- if err != nil {
+- return nil, err
+- }
+-
+- results := make([]*api.Resource, 0, len(matches))
+- for _, item := range matches {
+- results = append(results, item.(*api.Resource))
+- }
+-
+- return results, nil
+-}
+-
+-// Get returns the Resource matching given UID from the storage
+-func (c *resourceClient) Get(uid runtime.UID) (*api.Resource, error) {
+- log.Tracef("Client.Get; UID: %q, GVK: %v", uid, c.gvk)
+- object, err := c.storage.Get(c.gvk, uid)
+- if err != nil {
+- return nil, err
+- }
+-
+- return object.(*api.Resource), nil
+-}
+-
+-// Set saves the given Resource into the persistent storage
+-func (c *resourceClient) Set(resource *api.Resource) error {
+- log.Tracef("Client.Set; UID: %q, GVK: %v", resource.GetUID(), c.gvk)
+- return c.storage.Set(c.gvk, resource)
+-}
+-
+-// Patch performs a strategic merge patch on the object with
+-// the given UID, using the byte-encoded patch given
+-func (c *resourceClient) Patch(uid runtime.UID, patch []byte) error {
+- return c.storage.Patch(c.gvk, uid, patch)
+-}
+-
+-// Delete deletes the Resource from the storage
+-func (c *resourceClient) Delete(uid runtime.UID) error {
+- log.Tracef("Client.Delete; UID: %q, GVK: %v", uid, c.gvk)
+- return c.storage.Delete(c.gvk, uid)
+-}
+-
+-// List returns a list of all Resources available
+-func (c *resourceClient) List() ([]*api.Resource, error) {
+- log.Tracef("Client.List; GVK: %v", c.gvk)
+- list, err := c.storage.List(c.gvk)
+- if err != nil {
+- return nil, err
+- }
+-
+- results := make([]*api.Resource, 0, len(list))
+- for _, item := range list {
+- results = append(results, item.(*api.Resource))
+- }
+-
+- return results, nil
+-}
+diff --git a/pkg/filter/interfaces.go b/pkg/filter/interfaces.go
+index 62d3cd3..a097112 100644
+--- a/pkg/filter/interfaces.go
++++ b/pkg/filter/interfaces.go
+@@ -1,48 +1,20 @@
+ package filter
+
+-import "github.com/weaveworks/libgitops/pkg/runtime"
++import (
++ "errors"
+
+-// ListFilter is an interface for pipe-like list filtering behavior.
+-type ListFilter interface {
+- // Filter walks through all objects in obj, assesses whether the object
+- // matches the filter parameters, and conditionally adds it to the return
+- // slice or not. This method can be thought of like an UNIX pipe.
+- Filter(objs ...runtime.Object) ([]runtime.Object, error)
+-}
++ "sigs.k8s.io/controller-runtime/pkg/client"
++)
++
++var (
++ // ErrInvalidFilterParams describes an error where invalid parameters were given
++ // to a filter.
++ ErrInvalidFilterParams = errors.New("invalid parameters given to filter")
++)
+
+ // ObjectFilter is an interface for filtering objects one-by-one.
+ type ObjectFilter interface {
+- // Filter takes in one object (at once, per invocation), and returns a
++ // Match takes in one object (at once, per invocation), and returns a
+ // boolean whether the object matches the filter parameters, or not.
+- Filter(obj runtime.Object) (bool, error)
+-}
+-
+-// ObjectToListFilter transforms an ObjectFilter into a ListFilter. If of is nil,
+-// this function panics.
+-func ObjectToListFilter(of ObjectFilter) ListFilter {
+- if of == nil {
+- panic("programmer error: of ObjectFilter must not be nil in ObjectToListFilter")
+- }
+- return &objectToListFilter{of}
+-}
+-
+-type objectToListFilter struct {
+- of ObjectFilter
+-}
+-
+-// Filter implements ListFilter, but uses an ObjectFilter for the underlying logic.
+-func (f objectToListFilter) Filter(objs ...runtime.Object) (retarr []runtime.Object, err error) {
+- // Walk through all objects
+- for _, obj := range objs {
+- // Match them one-by-one against the ObjectFilter
+- match, err := f.of.Filter(obj)
+- if err != nil {
+- return nil, err
+- }
+- // If the object matches, include it in the return array
+- if match {
+- retarr = append(retarr, obj)
+- }
+- }
+- return
++ Match(obj client.Object) (bool, error)
+ }
+diff --git a/pkg/filter/labels.go b/pkg/filter/labels.go
+new file mode 100644
+index 0000000..24ef9f1
+--- /dev/null
++++ b/pkg/filter/labels.go
+@@ -0,0 +1,46 @@
++package filter
++
++import (
++ "fmt"
++
++ "k8s.io/apimachinery/pkg/labels"
++ "sigs.k8s.io/controller-runtime/pkg/client"
++)
++
++// LabelsFilter implements ObjectFilter and FilterOption.
++// It also implements client.{List,DeleteAllOf}Option so
++// it can be passed into client.Client.{List,DeleteAllOf}
++// as a way to conveniently filter those lists.
++var _ ObjectFilter = LabelsFilter{}
++var _ FilterOption = LabelsFilter{}
++var _ client.ListOption = LabelsFilter{}
++var _ client.DeleteAllOfOption = LabelsFilter{}
++
++// LabelsFilter is an ObjectFilter that compares metav1.Object.GetLabels()
++// to the LabelSelector field.
++type LabelsFilter struct {
++ // LabelSelector filters results by label. Use SetLabelSelector to
++ // set from raw string form.
++ // +required
++ LabelSelector labels.Selector
++}
++
++// Match implements ObjectFilter
++func (f LabelsFilter) Match(obj client.Object) (bool, error) {
++ // Require f.Namespace to always be set.
++ if f.LabelSelector == nil {
++ return false, fmt.Errorf("the LabelsFilter.LabelSelector field must not be nil: %w", ErrInvalidFilterParams)
++ }
++
++ return f.LabelSelector.Matches(labels.Set(obj.GetLabels())), nil
++}
++
++// ApplyToList implements client.ListOption, but is just a "dummy" implementation in order to implement
++// the interface, so that this struct can be passed to client.Reader.List()
++func (f LabelsFilter) ApplyToList(_ *client.ListOptions) {}
++func (f LabelsFilter) ApplyToDeleteAllOf(_ *client.DeleteAllOfOptions) {}
++
++// ApplyToFilterOptions implements FilterOption
++func (f LabelsFilter) ApplyToFilterOptions(target *FilterOptions) {
++ target.ObjectFilters = append(target.ObjectFilters, f)
++}
+diff --git a/pkg/filter/name.go b/pkg/filter/name.go
+index 42e516c..ade3d99 100644
+--- a/pkg/filter/name.go
++++ b/pkg/filter/name.go
+@@ -4,40 +4,36 @@ import (
+ "fmt"
+ "strings"
+
+- "github.com/weaveworks/libgitops/pkg/runtime"
++ "sigs.k8s.io/controller-runtime/pkg/client"
+ )
+
+-// NameFilter implements ObjectFilter and ListOption.
++// NameFilter implements ObjectFilter and FilterOption.
++// It also implements client.{List,DeleteAllOf}Option so
++// it can be passed into client.Client.{List,DeleteAllOf}
++// as a way to conveniently filter those lists.
+ var _ ObjectFilter = NameFilter{}
+-var _ ListOption = NameFilter{}
++var _ FilterOption = NameFilter{}
++var _ client.ListOption = NameFilter{}
++var _ client.DeleteAllOfOption = NameFilter{}
+
+-// NameFilter is an ObjectFilter that compares runtime.Object.GetName()
++// NameFilter is an ObjectFilter that compares Object.GetName()
+ // to the Name field by either equality or prefix.
+ type NameFilter struct {
+ // Name matches the object by .metadata.name.
+ // +required
+ Name string
+- // Namespace matches the object by .metadata.namespace. If left as
+- // an empty string, it is ignored when filtering.
+- // +optional
+- Namespace string
+- // MatchPrefix whether the name (not namespace) matching should be exact, or prefix-based.
++ // MatchPrefix whether the name matching should be exact, or prefix-based.
+ // +optional
+ MatchPrefix bool
+ }
+
+-// Filter implements ObjectFilter
+-func (f NameFilter) Filter(obj runtime.Object) (bool, error) {
++// Match implements ObjectFilter
++func (f NameFilter) Match(obj client.Object) (bool, error) {
+ // Require f.Name to always be set.
+ if len(f.Name) == 0 {
+ return false, fmt.Errorf("the NameFilter.Name field must not be empty: %w", ErrInvalidFilterParams)
+ }
+
+- // If f.Namespace is set, and it does not match the object, return false
+- if len(f.Namespace) > 0 && f.Namespace != obj.GetNamespace() {
+- return false, nil
+- }
+-
+ // If the Name should be matched by the prefix, use strings.HasPrefix
+ if f.MatchPrefix {
+ return strings.HasPrefix(obj.GetName(), f.Name), nil
+@@ -46,9 +42,12 @@ func (f NameFilter) Filter(obj runtime.Object) (bool, error) {
+ return f.Name == obj.GetName(), nil
+ }
+
+-// ApplyToListOptions implements ListOption, and adds itself converted to
+-// a ListFilter to ListOptions.Filters.
+-func (f NameFilter) ApplyToListOptions(target *ListOptions) error {
+- target.Filters = append(target.Filters, ObjectToListFilter(f))
+- return nil
++// ApplyToList implements client.ListOption, but is just a "dummy" implementation in order to implement
++// the interface, so that this struct can be passed to client.Reader.List()
++func (f NameFilter) ApplyToList(_ *client.ListOptions) {}
++func (f NameFilter) ApplyToDeleteAllOf(_ *client.DeleteAllOfOptions) {}
++
++// ApplyToFilterOptions implements FilterOption
++func (f NameFilter) ApplyToFilterOptions(target *FilterOptions) {
++ target.ObjectFilters = append(target.ObjectFilters, f)
+ }
+diff --git a/pkg/filter/namespace.go b/pkg/filter/namespace.go
+new file mode 100644
+index 0000000..ae1c884
+--- /dev/null
++++ b/pkg/filter/namespace.go
+@@ -0,0 +1,45 @@
++package filter
++
++import (
++ "fmt"
++
++ "sigs.k8s.io/controller-runtime/pkg/client"
++)
++
++// NamespaceFilter implements ObjectFilter and FilterOption.
++// It also implements client.{List,DeleteAllOf}Option so
++// it can be passed into client.Client.{List,DeleteAllOf}
++// as a way to conveniently filter those lists.
++var _ ObjectFilter = NamespaceFilter{}
++var _ FilterOption = NamespaceFilter{}
++var _ client.ListOption = NamespaceFilter{}
++var _ client.DeleteAllOfOption = NamespaceFilter{}
++
++// NamespaceFilter is an ObjectFilter that compares Object.GetNamespace()
++// to the Namespace field.
++type NamespaceFilter struct {
++ // Namespace matches the object by .metadata.namespace. If left as
++ // an empty string, it is ignored when filtering.
++ // +required
++ Namespace string
++}
++
++// Match implements ObjectFilter
++func (f NamespaceFilter) Match(obj client.Object) (bool, error) {
++ // Require f.Namespace to always be set.
++ if len(f.Namespace) == 0 {
++ return false, fmt.Errorf("the NamespaceFilter.Namespace field must not be empty: %w", ErrInvalidFilterParams)
++ }
++ // Otherwise, just use an equality check
++ return f.Namespace == obj.GetNamespace(), nil
++}
++
++// ApplyToList implements client.ListOption, but is just a "dummy" implementation in order to implement
++// the interface, so that this struct can be passed to client.Reader.List()
++func (f NamespaceFilter) ApplyToList(_ *client.ListOptions) {}
++func (f NamespaceFilter) ApplyToDeleteAllOf(_ *client.DeleteAllOfOptions) {}
++
++// ApplyToFilterOptions implements FilterOption
++func (f NamespaceFilter) ApplyToFilterOptions(target *FilterOptions) {
++ target.ObjectFilters = append(target.ObjectFilters, f)
++}
+diff --git a/pkg/filter/options.go b/pkg/filter/options.go
+index 4a831dd..6608da3 100644
+--- a/pkg/filter/options.go
++++ b/pkg/filter/options.go
+@@ -1,27 +1,56 @@
+ package filter
+
+-// ListOptions is a generic struct for listing options.
+-type ListOptions struct {
+- // Filters contains a chain of ListFilters, which will be processed in order and pipe the
+- // available objects through before returning.
+- Filters []ListFilter
++import "sigs.k8s.io/controller-runtime/pkg/client"
++
++// FilterOption is an interface for implementations that know how to
++// mutate FilterOptions.
++type FilterOption interface {
++ // ApplyToFilterOptions applies the configuration of the current object into a target FilterOptions struct.
++ ApplyToFilterOptions(target *FilterOptions)
+ }
+
+-// ListOption is an interface which can be passed into e.g. List() methods as a variadic-length
+-// argument list.
+-type ListOption interface {
+- // ApplyToListOptions applies the configuration of the current object into a target ListOptions struct.
+- ApplyToListOptions(target *ListOptions) error
++// FilterOptions is a set of options for filtering. It implements the ObjectFilter interface
++// itself, so it can be used kind of as a multi-ObjectFilter.
++type FilterOptions struct {
++ // ObjectFilters contains a set of filters for a single object. All of the filters must return
++ // true an a nil error for Match(obj) to return (true, nil).
++ ObjectFilters []ObjectFilter
+ }
+
+-// MakeListOptions makes a completed ListOptions struct from a list of ListOption implementations.
+-func MakeListOptions(opts ...ListOption) (*ListOptions, error) {
+- o := &ListOptions{}
+- for _, opt := range opts {
+- // For every option, apply it into o, and check if there's an error
+- if err := opt.ApplyToListOptions(o); err != nil {
+- return nil, err
++// Match matches the object against all the ObjectFilters.
++func (o *FilterOptions) Match(obj client.Object) (bool, error) {
++ for _, filter := range o.ObjectFilters {
++ matched, err := filter.Match(obj)
++ if err != nil {
++ return false, err
++ }
++ if !matched {
++ return false, nil
+ }
+ }
+- return o, nil
++ return true, nil
++}
++
++// ApplyToFilterOptions implements FilterOption
++func (o *FilterOptions) ApplyToFilterOptions(target *FilterOptions) {
++ target.ObjectFilters = append(target.ObjectFilters, o.ObjectFilters...)
++}
++
++// ApplyOptions applies the given FilterOptions to itself and returns itself.
++func (o *FilterOptions) ApplyOptions(opts []FilterOption) *FilterOptions {
++ for _, opt := range opts {
++ opt.ApplyToFilterOptions(o)
++ }
++ return o
++}
++
++// ApplyOption applies one option that aims to implement FilterOption,
++// but at compile-time maybe does not for sure. This can be used for
++// lists of other Options that possibly implement FilterOption in the
++// following way: for _, opt := range opts { filterOpts.ApplyOption(opt) }
++func (o *FilterOptions) ApplyOption(opt interface{}) *FilterOptions {
++ if fOpt, ok := opt.(FilterOption); ok {
++ fOpt.ApplyToFilterOptions(o)
++ }
++ return o
+ }
+diff --git a/pkg/filter/uid.go b/pkg/filter/uid.go
+index eea48ff..1aedab3 100644
+--- a/pkg/filter/uid.go
++++ b/pkg/filter/uid.go
+@@ -1,25 +1,23 @@
+ package filter
+
+ import (
+- "errors"
+ "fmt"
+ "strings"
+
+- "github.com/weaveworks/libgitops/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
++ "sigs.k8s.io/controller-runtime/pkg/client"
+ )
+
+-var (
+- // ErrInvalidFilterParams describes an error where invalid parameters were given
+- // to a filter.
+- ErrInvalidFilterParams = errors.New("invalid parameters given to filter")
+-)
+-
+-// UIDFilter implements ObjectFilter and ListOption.
++// UIDFilter implements ObjectFilter and FilterOption.
++// It also implements client.{List,DeleteAllOf}Option so
++// it can be passed into client.Client.{List,DeleteAllOf}
++// as a way to conveniently filter those lists.
+ var _ ObjectFilter = UIDFilter{}
+-var _ ListOption = UIDFilter{}
++var _ FilterOption = UIDFilter{}
++var _ client.ListOption = UIDFilter{}
++var _ client.DeleteAllOfOption = UIDFilter{}
+
+-// UIDFilter is an ObjectFilter that compares runtime.Object.GetUID() to
++// UIDFilter is an ObjectFilter that compares Object.GetUID() to
+ // the UID field by either equality or prefix. The UID field is required,
+ // otherwise ErrInvalidFilterParams is returned.
+ type UIDFilter struct {
+@@ -31,8 +29,8 @@ type UIDFilter struct {
+ MatchPrefix bool
+ }
+
+-// Filter implements ObjectFilter
+-func (f UIDFilter) Filter(obj runtime.Object) (bool, error) {
++// Match implements ObjectFilter
++func (f UIDFilter) Match(obj client.Object) (bool, error) {
+ // Require f.UID to always be set.
+ if len(f.UID) == 0 {
+ return false, fmt.Errorf("the UIDFilter.UID field must not be empty: %w", ErrInvalidFilterParams)
+@@ -45,9 +43,12 @@ func (f UIDFilter) Filter(obj runtime.Object) (bool, error) {
+ return f.UID == obj.GetUID(), nil
+ }
+
+-// ApplyToListOptions implements ListOption, and adds itself converted to
+-// a ListFilter to ListOptions.Filters.
+-func (f UIDFilter) ApplyToListOptions(target *ListOptions) error {
+- target.Filters = append(target.Filters, ObjectToListFilter(f))
+- return nil
++// ApplyToList implements client.ListOption, but is just a "dummy" implementation in order to implement
++// the interface, so that this struct can be passed to client.Reader.List()
++func (f UIDFilter) ApplyToList(_ *client.ListOptions) {}
++func (f UIDFilter) ApplyToDeleteAllOf(_ *client.DeleteAllOfOptions) {}
++
++// ApplyToFilterOptions implements FilterOption
++func (f UIDFilter) ApplyToFilterOptions(target *FilterOptions) {
++ target.ObjectFilters = append(target.ObjectFilters, f)
+ }
+diff --git a/pkg/gitdir/gitdir.go b/pkg/gitdir/gitdir.go
+deleted file mode 100644
+index a9eb0b7..0000000
+--- a/pkg/gitdir/gitdir.go
++++ /dev/null
+@@ -1,474 +0,0 @@
+-package gitdir
+-
+-import (
+- "context"
+- "errors"
+- "fmt"
+- "io/ioutil"
+- "os"
+- "sync"
+- "time"
+-
+- "github.com/fluxcd/go-git-providers/gitprovider"
+- git "github.com/go-git/go-git/v5"
+- "github.com/go-git/go-git/v5/plumbing"
+- "github.com/go-git/go-git/v5/plumbing/object"
+- log "github.com/sirupsen/logrus"
+- "k8s.io/apimachinery/pkg/util/wait"
+-)
+-
+-var (
+- // ErrNotStarted happens if you try to operate on the gitDirectory before you have started
+- // it with StartCheckoutLoop.
+- ErrNotStarted = errors.New("the gitDirectory hasn't been started (and hence, cloned) yet")
+- // ErrCannotWriteToReadOnly happens if you try to do a write operation for a non-authenticated Git repo.
+- ErrCannotWriteToReadOnly = errors.New("the gitDirectory is read-only, cannot write")
+-)
+-
+-const (
+- defaultBranch = "master"
+- defaultRemote = "origin"
+- defaultInterval = 30 * time.Second
+- defaultTimeout = 1 * time.Minute
+-)
+-
+-// GitDirectoryOptions provides options for the gitDirectory.
+-// TODO: Refactor this into the controller-runtime Options factory pattern.
+-type GitDirectoryOptions struct {
+- // Options
+- Branch string // default "master"
+- Interval time.Duration // default 30s
+- Timeout time.Duration // default 1m
+- // TODO: Support folder prefixes
+-
+- // Authentication
+- AuthMethod AuthMethod
+-}
+-
+-func (o *GitDirectoryOptions) Default() {
+- if o.Branch == "" {
+- o.Branch = defaultBranch
+- }
+- if o.Interval == 0 {
+- o.Interval = defaultInterval
+- }
+- if o.Timeout == 0 {
+- o.Timeout = defaultTimeout
+- }
+-}
+-
+-// GitDirectory is an abstraction layer for a temporary Git clone. It pulls
+-// and checks out new changes periodically in the background. It also allows
+-// high-level access to write operations, like creating a new branch, committing,
+-// and pushing.
+-type GitDirectory interface {
+- // Dir returns the backing temporary directory of the git clone.
+- Dir() string
+- // MainBranch returns the configured main branch.
+- MainBranch() string
+- // RepositoryRef returns the repository reference.
+- RepositoryRef() gitprovider.RepositoryRef
+-
+- // StartCheckoutLoop clones the repo synchronously, and then starts the checkout loop non-blocking.
+- // If the checkout loop has been started already, this is a no-op.
+- StartCheckoutLoop() error
+- // Suspend waits for any pending transactions or operations, and then locks the internal mutex so that
+- // no other operations can start. This means the periodic background checkout loop will momentarily stop.
+- Suspend()
+- // Resume unlocks the mutex locked in Suspend(), so that other Git operations, like the background checkout
+- // loop can resume its operation.
+- Resume()
+-
+- // Pull performs a pull & checkout to the latest revision.
+- // ErrNotStarted is returned if the repo hasn't been cloned yet.
+- Pull(ctx context.Context) error
+-
+- // CheckoutNewBranch creates a new branch and checks out to it.
+- // ErrNotStarted is returned if the repo hasn't been cloned yet.
+- CheckoutNewBranch(branchName string) error
+- // CheckoutMainBranch goes back to the main branch.
+- // ErrNotStarted is returned if the repo hasn't been cloned yet.
+- CheckoutMainBranch() error
+-
+- // Commit creates a commit of all changes in the current worktree with the given parameters.
+- // It also automatically pushes the branch after the commit.
+- // ErrNotStarted is returned if the repo hasn't been cloned yet.
+- // ErrCannotWriteToReadOnly is returned if opts.AuthMethod wasn't provided.
+- Commit(ctx context.Context, authorName, authorEmail, msg string) error
+- // CommitChannel is a channel to where new observed Git SHAs are written.
+- CommitChannel() chan string
+-
+- // Cleanup terminates any pending operations, and removes the temporary directory.
+- Cleanup() error
+-}
+-
+-// Create a new GitDirectory implementation. In order to start using this, run StartCheckoutLoop().
+-func NewGitDirectory(repoRef gitprovider.RepositoryRef, opts GitDirectoryOptions) (GitDirectory, error) {
+- log.Info("Initializing the Git repo...")
+-
+- // Default the options
+- opts.Default()
+-
+- // Create a temporary directory for the clone
+- tmpDir, err := ioutil.TempDir("", "libgitops")
+- if err != nil {
+- return nil, err
+- }
+- log.Debugf("Created temporary directory for the git clone at %q", tmpDir)
+-
+- d := &gitDirectory{
+- repoRef: repoRef,
+- GitDirectoryOptions: opts,
+- cloneDir: tmpDir,
+- // TODO: This needs to be large, otherwise it can start blocking unnecessarily if nobody reads it
+- commitChan: make(chan string, 1024),
+- lock: &sync.Mutex{},
+- }
+- // Set up the parent context for this class. d.cancel() is called only at Cleanup()
+- d.ctx, d.cancel = context.WithCancel(context.Background())
+-
+- log.Trace("URL endpoint parsed and authentication method chosen")
+-
+- if d.canWrite() {
+- log.Infof("Running in read-write mode, will commit back current status to the repo")
+- } else {
+- log.Infof("Running in read-only mode, won't write status back to the repo")
+- }
+-
+- return d, nil
+-}
+-
+-// gitDirectory is an implementation which keeps a directory
+-type gitDirectory struct {
+- // user-specified options
+- repoRef gitprovider.RepositoryRef
+- GitDirectoryOptions
+-
+- // the temporary directory used for the clone
+- cloneDir string
+-
+- // go-git objects. wt is the worktree of the repo, persistent during the lifetime of repo.
+- repo *git.Repository
+- wt *git.Worktree
+-
+- // latest known commit to the system
+- lastCommit string
+- // events channel from new commits
+- commitChan chan string
+-
+- // the context and its cancel function for the lifetime of this struct (until Cleanup())
+- ctx context.Context
+- cancel context.CancelFunc
+- // the lock for git operations (so pushing and pulling aren't done simultaneously)
+- lock *sync.Mutex
+-}
+-
+-func (d *gitDirectory) Dir() string {
+- return d.cloneDir
+-}
+-
+-func (d *gitDirectory) MainBranch() string {
+- return d.Branch
+-}
+-
+-func (d *gitDirectory) RepositoryRef() gitprovider.RepositoryRef {
+- return d.repoRef
+-}
+-
+-// StartCheckoutLoop clones the repo synchronously, and then starts the checkout loop non-blocking.
+-// If the checkout loop has been started already, this is a no-op.
+-func (d *gitDirectory) StartCheckoutLoop() error {
+- if d.wt != nil {
+- return nil // already initialized
+- }
+- // First, clone the repo
+- if err := d.clone(); err != nil {
+- return err
+- }
+- go d.checkoutLoop()
+- return nil
+-}
+-
+-func (d *gitDirectory) Suspend() {
+- d.lock.Lock()
+-}
+-
+-func (d *gitDirectory) Resume() {
+- d.lock.Unlock()
+-}
+-
+-func (d *gitDirectory) CommitChannel() chan string {
+- return d.commitChan
+-}
+-
+-func (d *gitDirectory) checkoutLoop() {
+- log.Info("Starting the checkout loop...")
+-
+- wait.NonSlidingUntilWithContext(d.ctx, func(_ context.Context) {
+-
+- log.Trace("checkoutLoop: Will perform pull operation")
+- // Perform a pull & checkout of the new revision
+- if err := d.Pull(d.ctx); err != nil {
+- log.Errorf("checkoutLoop: git pull failed with error: %v", err)
+- return
+- }
+-
+- }, d.Interval)
+- log.Info("Exiting the checkout loop...")
+-}
+-
+-func (d *gitDirectory) cloneURL() string {
+- return d.repoRef.GetCloneURL(d.AuthMethod.TransportType())
+-}
+-
+-func (d *gitDirectory) canWrite() bool {
+- return d.AuthMethod != nil
+-}
+-
+-// verifyRead makes sure it's ok to start a read-something-from-git process
+-func (d *gitDirectory) verifyRead() error {
+- // Safeguard against not starting yet
+- if d.wt == nil {
+- return fmt.Errorf("cannot pull: %w", ErrNotStarted)
+- }
+- return nil
+-}
+-
+-// verifyWrite makes sure it's ok to start a write-something-to-git process
+-func (d *gitDirectory) verifyWrite() error {
+- // We need all read privileges first
+- if err := d.verifyRead(); err != nil {
+- return err
+- }
+- // Make sure we don't write to a possibly read-only repo
+- if !d.canWrite() {
+- return ErrCannotWriteToReadOnly
+- }
+- return nil
+-}
+-
+-func (d *gitDirectory) clone() error {
+- // Lock the mutex now that we're starting, and unlock it when exiting
+- d.lock.Lock()
+- defer d.lock.Unlock()
+-
+- log.Infof("Starting to clone the repository %s with timeout %s", d.repoRef, d.Timeout)
+- // Do a clone operation to the temporary directory, with a timeout
+- err := d.contextWithTimeout(d.ctx, func(ctx context.Context) error {
+- var err error
+- d.repo, err = git.PlainCloneContext(ctx, d.Dir(), false, &git.CloneOptions{
+- URL: d.cloneURL(),
+- Auth: d.AuthMethod,
+- RemoteName: defaultRemote,
+- ReferenceName: plumbing.NewBranchReferenceName(d.Branch),
+- SingleBranch: true,
+- NoCheckout: false,
+- //Depth: 1, // ref: https://github.com/src-d/go-git/issues/1143
+- RecurseSubmodules: 0,
+- Progress: nil,
+- Tags: git.NoTags,
+- })
+- return err
+- })
+- // Handle errors
+- switch err {
+- case nil:
+- // no-op, just continue.
+- case context.DeadlineExceeded:
+- return fmt.Errorf("git clone operation took longer than deadline %s", d.Timeout)
+- case context.Canceled:
+- log.Tracef("context was cancelled")
+- return nil // if Cleanup() was called, just exit the goroutine
+- default:
+- return fmt.Errorf("git clone error: %v", err)
+- }
+-
+- // Populate the worktree pointer
+- d.wt, err = d.repo.Worktree()
+- if err != nil {
+- return fmt.Errorf("git get worktree error: %v", err)
+- }
+-
+- // Get the latest HEAD commit and report it to the user
+- ref, err := d.repo.Head()
+- if err != nil {
+- return err
+- }
+-
+- d.observeCommit(ref.Hash())
+- return nil
+-}
+-
+-func (d *gitDirectory) Pull(ctx context.Context) error {
+- // Lock the mutex now that we're starting, and unlock it when exiting
+- d.lock.Lock()
+- defer d.lock.Unlock()
+-
+- // Make sure it's okay to read
+- if err := d.verifyRead(); err != nil {
+- return err
+- }
+-
+- // Perform the git pull operation using the timeout
+- err := d.contextWithTimeout(ctx, func(innerCtx context.Context) error {
+- log.Trace("checkoutLoop: Starting pull operation")
+- return d.wt.PullContext(innerCtx, &git.PullOptions{
+- Auth: d.AuthMethod,
+- SingleBranch: true,
+- })
+- })
+- // Handle errors
+- switch err {
+- case nil, git.NoErrAlreadyUpToDate:
+- // no-op, just continue. Allow the git.NoErrAlreadyUpToDate error
+- case context.DeadlineExceeded:
+- return fmt.Errorf("git pull operation took longer than deadline %s", d.Timeout)
+- case context.Canceled:
+- log.Tracef("context was cancelled")
+- return nil // if Cleanup() was called, just exit the goroutine
+- default:
+- return fmt.Errorf("failed to pull: %v", err)
+- }
+-
+- log.Trace("checkoutLoop: Pulled successfully")
+-
+- // get current head
+- ref, err := d.repo.Head()
+- if err != nil {
+- return err
+- }
+-
+- // check if we changed commits
+- if d.lastCommit != ref.Hash().String() {
+- // Notify upstream that we now have a new commit, and allow writing again
+- d.observeCommit(ref.Hash())
+- }
+-
+- return nil
+-}
+-
+-func (d *gitDirectory) CheckoutNewBranch(branchName string) error {
+- // Make sure it's okay to write
+- if err := d.verifyWrite(); err != nil {
+- return err
+- }
+-
+- return d.wt.Checkout(&git.CheckoutOptions{
+- Branch: plumbing.NewBranchReferenceName(branchName),
+- Create: true,
+- })
+-}
+-
+-func (d *gitDirectory) CheckoutMainBranch() error {
+- // Make sure it's okay to write
+- if err := d.verifyWrite(); err != nil {
+- return err
+- }
+-
+- // Best-effort clean
+- _ = d.wt.Clean(&git.CleanOptions{
+- Dir: true,
+- })
+- // Force-checkout the main branch
+- return d.wt.Checkout(&git.CheckoutOptions{
+- Branch: plumbing.NewBranchReferenceName(d.Branch),
+- Force: true,
+- })
+-}
+-
+-// observeCommit sets the lastCommit variable so that we know the latest state
+-func (d *gitDirectory) observeCommit(commit plumbing.Hash) {
+- d.lastCommit = commit.String()
+- d.commitChan <- commit.String()
+- log.Infof("New commit observed on branch %q: %s", d.Branch, commit)
+-}
+-
+-// Commit creates a commit of all changes in the current worktree with the given parameters.
+-// It also automatically pushes the branch after the commit.
+-// ErrNotStarted is returned if the repo hasn't been cloned yet.
+-// ErrCannotWriteToReadOnly is returned if opts.AuthMethod wasn't provided.
+-func (d *gitDirectory) Commit(ctx context.Context, authorName, authorEmail, msg string) error {
+- // Make sure it's okay to write
+- if err := d.verifyWrite(); err != nil {
+- return err
+- }
+-
+- s, err := d.wt.Status()
+- if err != nil {
+- return fmt.Errorf("git status failed: %v", err)
+- }
+- if s.IsClean() {
+- log.Debugf("No changed files in git repo, nothing to commit...")
+- return nil
+- }
+-
+- // Do a commit and push
+- log.Debug("commitLoop: Committing all local changes")
+- hash, err := d.wt.Commit(msg, &git.CommitOptions{
+- All: true,
+- Author: &object.Signature{
+- Name: authorName,
+- Email: authorEmail,
+- When: time.Now(),
+- },
+- })
+- if err != nil {
+- return fmt.Errorf("git commit error: %v", err)
+- }
+-
+- // Perform the git push operation using the timeout
+- err = d.contextWithTimeout(ctx, func(innerCtx context.Context) error {
+- log.Debug("commitLoop: Will push with timeout")
+- return d.repo.PushContext(innerCtx, &git.PushOptions{
+- Auth: d.AuthMethod,
+- })
+- })
+- // Handle errors
+- switch err {
+- case nil, git.NoErrAlreadyUpToDate:
+- // no-op, just continue. Allow the git.NoErrAlreadyUpToDate error
+- case context.DeadlineExceeded:
+- return fmt.Errorf("git push operation took longer than deadline %s", d.Timeout)
+- case context.Canceled:
+- log.Tracef("context was cancelled")
+- return nil // if Cleanup() was called, just exit the goroutine
+- default:
+- return fmt.Errorf("failed to push: %v", err)
+- }
+-
+- // Notify upstream that we now have a new commit, and allow writing again
+- log.Infof("A new commit with the actual state has been created and pushed to the origin: %q", hash)
+- d.observeCommit(hash)
+- return nil
+-}
+-
+-func (d *gitDirectory) contextWithTimeout(ctx context.Context, fn func(context.Context) error) error {
+- // Create a new context with a timeout. The push operation either succeeds in time, times out,
+- // or is cancelled by Cleanup(). In case of a successful run, the context is always cancelled afterwards.
+- ctx, cancel := context.WithTimeout(ctx, d.Timeout)
+- defer cancel()
+-
+- // Run the function using the context and cancel directly afterwards
+- fnErr := fn(ctx)
+-
+- // Return the context error, if any, first so deadline/cancel signals can propagate.
+- // Otherwise passthrough the error returned from the function.
+- if ctx.Err() != nil {
+- log.Debugf("operation context yielded error %v to be returned. Function error was: %v", ctx.Err(), fnErr)
+- return ctx.Err()
+- }
+- return fnErr
+-}
+-
+-// Cleanup cancels running goroutines and operations, and removes the temporary clone directory
+-func (d *gitDirectory) Cleanup() error {
+- // Cancel the context for the two running goroutines, and any possible long-running operations
+- d.cancel()
+-
+- // Remove the temporary directory
+- if err := os.RemoveAll(d.Dir()); err != nil {
+- log.Errorf("Failed to clean up temp git directory: %v", err)
+- return err
+- }
+- return nil
+-}
+diff --git a/pkg/runtime/doc.go b/pkg/runtime/doc.go
+deleted file mode 100644
+index 4eb2a1e..0000000
+--- a/pkg/runtime/doc.go
++++ /dev/null
+@@ -1,2 +0,0 @@
+-// +k8s:deepcopy-gen=package
+-package runtime
+diff --git a/pkg/runtime/identifiers.go b/pkg/runtime/identifiers.go
+deleted file mode 100644
+index 87bc00e..0000000
+--- a/pkg/runtime/identifiers.go
++++ /dev/null
+@@ -1,63 +0,0 @@
+-package runtime
+-
+-import (
+- "fmt"
+-
+- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+-)
+-
+-// DefaultNamespace describes the default namespace name used for the system.
+-const DefaultNamespace = "default"
+-
+-// Identifyable is an object which can be identified
+-type Identifyable interface {
+- // GetIdentifier can return e.g. a "namespace/name" combination, which is not guaranteed
+- // to be unique world-wide, or alternatively a random SHA for instance
+- GetIdentifier() string
+-}
+-
+-type identifier string
+-
+-func (i identifier) GetIdentifier() string { return string(i) }
+-
+-type Metav1NameIdentifierFactory struct{}
+-
+-func (id Metav1NameIdentifierFactory) Identify(o interface{}) (Identifyable, bool) {
+- switch obj := o.(type) {
+- case metav1.Object:
+- if len(obj.GetNamespace()) == 0 || len(obj.GetName()) == 0 {
+- return nil, false
+- }
+- return NewIdentifier(fmt.Sprintf("%s/%s", obj.GetNamespace(), obj.GetName())), true
+- }
+- return nil, false
+-}
+-
+-type ObjectUIDIdentifierFactory struct{}
+-
+-func (id ObjectUIDIdentifierFactory) Identify(o interface{}) (Identifyable, bool) {
+- switch obj := o.(type) {
+- case Object:
+- if len(obj.GetUID()) == 0 {
+- return nil, false
+- }
+- // TODO: Make sure that runtime.APIType works with this
+- return NewIdentifier(string(obj.GetUID())), true
+- }
+- return nil, false
+-}
+-
+-var (
+- // Metav1Identifier identifies an object using its metav1.ObjectMeta Name and Namespace
+- Metav1NameIdentifier IdentifierFactory = Metav1NameIdentifierFactory{}
+- // ObjectUIDIdentifier identifies an object using its libgitops/pkg/runtime.ObjectMeta UID field
+- ObjectUIDIdentifier IdentifierFactory = ObjectUIDIdentifierFactory{}
+-)
+-
+-func NewIdentifier(str string) Identifyable {
+- return identifier(str)
+-}
+-
+-type IdentifierFactory interface {
+- Identify(o interface{}) (id Identifyable, ok bool)
+-}
+diff --git a/pkg/runtime/meta.go b/pkg/runtime/meta.go
+deleted file mode 100644
+index 32930e1..0000000
+--- a/pkg/runtime/meta.go
++++ /dev/null
+@@ -1,52 +0,0 @@
+-package runtime
+-
+-import (
+- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+- "k8s.io/apimachinery/pkg/runtime"
+- "sigs.k8s.io/yaml"
+-)
+-
+-// PartialObjectImpl is a struct implementing PartialObject, used for
+-// unmarshalling unknown objects into this intermediate type
+-// where .Name, .UID, .Kind and .APIVersion become easily available
+-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+-type PartialObjectImpl struct {
+- metav1.TypeMeta `json:",inline"`
+- metav1.ObjectMeta `json:"metadata"`
+-}
+-
+-func (po *PartialObjectImpl) IsPartialObject() {}
+-
+-// This constructor ensures the PartialObjectImpl fields are not nil.
+-// TODO: Make this multi-document-aware?
+-func NewPartialObject(frame []byte) (PartialObject, error) {
+- obj := &PartialObjectImpl{}
+-
+- // The yaml package supports both YAML and JSON. Don't use the serializer, as the APIType
+- // wrapper is not registered in any scheme.
+- if err := yaml.Unmarshal(frame, obj); err != nil {
+- return nil, err
+- }
+-
+- return obj, nil
+-}
+-
+-var _ Object = &PartialObjectImpl{}
+-var _ PartialObject = &PartialObjectImpl{}
+-
+-// Object is an union of the Object interfaces that are accessible for a
+-// type that embeds both metav1.TypeMeta and metav1.ObjectMeta.
+-type Object interface {
+- runtime.Object
+- metav1.ObjectMetaAccessor
+- metav1.Object
+-}
+-
+-// PartialObject is a partially-decoded object, where only metadata has been loaded.
+-type PartialObject interface {
+- Object
+-
+- // IsPartialObject is a dummy function for signalling that this is a partially-loaded object
+- // i.e. only TypeMeta and ObjectMeta are stored in memory.
+- IsPartialObject()
+-}
+diff --git a/pkg/runtime/zz_generated.deepcopy.go b/pkg/runtime/zz_generated.deepcopy.go
+deleted file mode 100644
+index 20beb72..0000000
+--- a/pkg/runtime/zz_generated.deepcopy.go
++++ /dev/null
+@@ -1,67 +0,0 @@
+-// +build !ignore_autogenerated
+-
+-// Code generated by deepcopy-gen. DO NOT EDIT.
+-
+-package runtime
+-
+-import (
+- pkgruntime "k8s.io/apimachinery/pkg/runtime"
+-)
+-
+-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+-func (in *Metav1NameIdentifierFactory) DeepCopyInto(out *Metav1NameIdentifierFactory) {
+- *out = *in
+- return
+-}
+-
+-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metav1NameIdentifierFactory.
+-func (in *Metav1NameIdentifierFactory) DeepCopy() *Metav1NameIdentifierFactory {
+- if in == nil {
+- return nil
+- }
+- out := new(Metav1NameIdentifierFactory)
+- in.DeepCopyInto(out)
+- return out
+-}
+-
+-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+-func (in *ObjectUIDIdentifierFactory) DeepCopyInto(out *ObjectUIDIdentifierFactory) {
+- *out = *in
+- return
+-}
+-
+-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectUIDIdentifierFactory.
+-func (in *ObjectUIDIdentifierFactory) DeepCopy() *ObjectUIDIdentifierFactory {
+- if in == nil {
+- return nil
+- }
+- out := new(ObjectUIDIdentifierFactory)
+- in.DeepCopyInto(out)
+- return out
+-}
+-
+-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+-func (in *PartialObjectImpl) DeepCopyInto(out *PartialObjectImpl) {
+- *out = *in
+- out.TypeMeta = in.TypeMeta
+- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+- return
+-}
+-
+-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectImpl.
+-func (in *PartialObjectImpl) DeepCopy() *PartialObjectImpl {
+- if in == nil {
+- return nil
+- }
+- out := new(PartialObjectImpl)
+- in.DeepCopyInto(out)
+- return out
+-}
+-
+-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new pkgruntime.Object.
+-func (in *PartialObjectImpl) DeepCopyObject() pkgruntime.Object {
+- if c := in.DeepCopy(); c != nil {
+- return c
+- }
+- return nil
+-}
+diff --git a/pkg/serializer/comments.go b/pkg/serializer/comments.go
+index 302c4db..a016939 100644
+--- a/pkg/serializer/comments.go
++++ b/pkg/serializer/comments.go
+@@ -27,7 +27,7 @@ var (
+ func (d *decoder) tryToPreserveComments(doc []byte, obj runtime.Object, ct ContentType) {
+ // If the user opted into preserving comments and the format is YAML, proceed
+ // If they didn't, return directly
+- if !(*d.opts.PreserveComments && ct == ContentTypeYAML) {
++ if !(d.opts.PreserveComments == PreserveCommentsStrict && ct == ContentTypeYAML) {
+ return
+ }
+
+@@ -41,7 +41,7 @@ func (d *decoder) tryToPreserveComments(doc []byte, obj runtime.Object, ct Conte
+ // tryToPreserveComments tries to locate the possibly-saved original file data in the object's annotation
+ func (e *encoder) encodeWithCommentSupport(versionEncoder runtime.Encoder, fw FrameWriter, obj runtime.Object, metaObj metav1.Object) error {
+ // If the user did not opt into preserving comments, just sanitize ObjectMeta temporarily and and return
+- if !*e.opts.PreserveComments {
++ if e.opts.PreserveComments == PreserveCommentsDisable {
+ // Normal encoding without the annotation (so it doesn't leak by accident)
+ return noAnnotationWrapper(metaObj, e.normalEncodeFunc(versionEncoder, fw, obj))
+ }
+diff --git a/pkg/serializer/comments_test.go b/pkg/serializer/comments_test.go
+index 8f4c65c..6332e5c 100644
+--- a/pkg/serializer/comments_test.go
++++ b/pkg/serializer/comments_test.go
+@@ -18,8 +18,8 @@ kind: Test
+ spec:
+ # Head comment
+ data:
+- - field # Inline comment
+- - another
++ - field # Inline comment
++ - another
+ thing:
+ # Head comment
+ var: true
+@@ -29,9 +29,9 @@ const sampleData2 = `kind: Test
+ spec:
+ # Head comment
+ data:
+- - field # Inline comment
+- - another:
+- subthing: "yes"
++ - field # Inline comment
++ - another:
++ subthing: "yes"
+ thing:
+ # Head comment
+ var: true
+diff --git a/pkg/serializer/convertor.go b/pkg/serializer/convertor.go
+index bdea096..3fbc814 100644
+--- a/pkg/serializer/convertor.go
++++ b/pkg/serializer/convertor.go
+@@ -169,7 +169,8 @@ func (c *objectConvertor) ConvertToVersion(in runtime.Object, groupVersioner run
+ // as before, using the scheme's ConvertToVersion function. But if we don't want to convert the newly-decoded
+ // external object, we can just do nothing and the object will stay unconverted.
+ // doConversion is always true in the Encode codepath.
+- if !c.doConversion {
++ // Also, never convert unknown, partial metadata or unstructured objects (defined as "non-convertible").
++ if !c.doConversion || IsNonConvertible(in) {
+ // DeepCopy the object to make sure that although in would be somehow modified, it doesn't affect out
+ return in.DeepCopyObject(), nil
+ }
+diff --git a/pkg/serializer/decode.go b/pkg/serializer/decode.go
+index 4feff21..7aee5af 100644
+--- a/pkg/serializer/decode.go
++++ b/pkg/serializer/decode.go
+@@ -5,119 +5,38 @@ import (
+ "io"
+ "reflect"
+
+- "github.com/weaveworks/libgitops/pkg/util"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/runtime/serializer/json"
+ "k8s.io/apimachinery/pkg/runtime/serializer/versioning"
+- "sigs.k8s.io/yaml"
++ serializeryaml "k8s.io/apimachinery/pkg/runtime/serializer/yaml"
+ )
+
+ // This is the groupversionkind for the v1.List object
+ var listGVK = metav1.Unversioned.WithKind("List")
+
+-type DecodingOptions struct {
+- // Not applicable for Decoder.DecodeInto(). If true, the decoded external object
+- // will be converted into its hub (or internal, where applicable) representation. Otherwise, the decoded
+- // object will be left in its external representation. (Default: false)
+- ConvertToHub *bool
+-
+- // Parse the YAML/JSON in strict mode, returning a specific error if the input
+- // contains duplicate or unknown fields or formatting errors. (Default: true)
+- Strict *bool
+-
+- // Automatically default the decoded object. (Default: false)
+- Default *bool
+-
+- // Only applicable for Decoder.DecodeAll(). If the underlying data contains a v1.List,
+- // the items of the list will be traversed, decoded into their respective types, and
+- // appended to the returned slice. The v1.List will in this case not be returned.
+- // This conversion does NOT support preserving comments. If the given scheme doesn't
+- // recognize the v1.List, before using it will be registered automatically. (Default: true)
+- DecodeListElements *bool
+-
+- // Whether to preserve YAML comments internally. This only works for objects embedding metav1.ObjectMeta.
+- // Only applicable to ContentTypeYAML framers.
+- // Using any other framer will be silently ignored. Usage of this option also requires setting
+- // the PreserveComments in EncodingOptions, too. (Default: false)
+- PreserveComments *bool
+-
+- // DecodeUnknown specifies whether decode objects with an unknown GroupVersionKind into a
+- // *runtime.Unknown object when running Decode(All) (true value) or to return an error when
+- // any unrecognized type is found (false value). (Default: false)
+- DecodeUnknown *bool
+-}
+-
+-type DecodingOptionsFunc func(*DecodingOptions)
+-
+-func WithConvertToHubDecode(convert bool) DecodingOptionsFunc {
+- return func(opts *DecodingOptions) {
+- opts.ConvertToHub = &convert
+- }
+-}
+-
+-func WithStrictDecode(strict bool) DecodingOptionsFunc {
+- return func(opts *DecodingOptions) {
+- opts.Strict = &strict
+- }
+-}
+-
+-func WithDefaultsDecode(defaults bool) DecodingOptionsFunc {
+- return func(opts *DecodingOptions) {
+- opts.Default = &defaults
+- }
+-}
+-
+-func WithListElementsDecoding(listElements bool) DecodingOptionsFunc {
+- return func(opts *DecodingOptions) {
+- opts.DecodeListElements = &listElements
+- }
+-}
++// TODO: To think about: should we take in the DecodeOptions at Decode time instead
++// as a variadic-sized Option slice? It would probably take caching the *json.Serializer
++// and runtime.Decoder for the given options they use, though.
+
+-func WithCommentsDecode(comments bool) DecodingOptionsFunc {
+- return func(opts *DecodingOptions) {
+- opts.PreserveComments = &comments
+- }
+-}
+-
+-func WithUnknownDecode(unknown bool) DecodingOptionsFunc {
+- return func(opts *DecodingOptions) {
+- opts.DecodeUnknown = &unknown
+- }
+-}
+-
+-func WithDecodingOptions(newOpts DecodingOptions) DecodingOptionsFunc {
+- return func(opts *DecodingOptions) {
+- // TODO: Null-check all of these before using them
+- *opts = newOpts
+- }
+-}
++func newDecoder(schemeAndCodec *schemeAndCodec, opts DecodeOptions) Decoder {
++ // Allow both YAML and JSON inputs (JSON is a subset of YAML), and deserialize in strict mode
++ s := json.NewSerializerWithOptions(json.DefaultMetaFactory, schemeAndCodec.scheme, schemeAndCodec.scheme, json.SerializerOptions{
++ Yaml: true,
++ Strict: *opts.Strict,
++ })
+
+-func defaultDecodeOpts() *DecodingOptions {
+- return &DecodingOptions{
+- ConvertToHub: util.BoolPtr(false),
+- Strict: util.BoolPtr(true),
+- Default: util.BoolPtr(false),
+- DecodeListElements: util.BoolPtr(true),
+- PreserveComments: util.BoolPtr(false),
+- DecodeUnknown: util.BoolPtr(false),
+- }
+-}
++ decodeCodec := decoderForVersion(schemeAndCodec.scheme, s, *opts.Default, *opts.ConvertToHub)
+
+-func newDecodeOpts(fns ...DecodingOptionsFunc) *DecodingOptions {
+- opts := defaultDecodeOpts()
+- for _, fn := range fns {
+- fn(opts)
+- }
+- return opts
++ return &decoder{schemeAndCodec, decodeCodec, opts}
+ }
+
+ type decoder struct {
+ *schemeAndCodec
+
+ decoder runtime.Decoder
+- opts DecodingOptions
++ opts DecodeOptions
+ }
+
+ // Decode returns the decoded object from the next document in the FrameReader stream.
+@@ -149,8 +68,14 @@ func (d *decoder) Decode(fr FrameReader) (runtime.Object, error) {
+ func (d *decoder) decode(doc []byte, into runtime.Object, ct ContentType) (runtime.Object, error) {
+ // If the scheme doesn't recognize a v1.List, and we enabled opts.DecodeListElements,
+ // make the scheme able to decode the v1.List automatically
+- if *d.opts.DecodeListElements && !d.scheme.Recognizes(listGVK) {
+- d.scheme.AddKnownTypes(metav1.Unversioned, &metav1.List{})
++ if *d.opts.DecodeListElements {
++ // As .AddKnownTypes is writing to the scheme, make sure we guard the check and the write with a
++ // mutex.
++ d.schemeMu.Lock()
++ if !d.scheme.Recognizes(listGVK) {
++ d.scheme.AddKnownTypes(metav1.Unversioned, &metav1.List{})
++ }
++ d.schemeMu.Unlock()
+ }
+
+ // Record if this decode call should have runtime.DecodeInto-functionality
+@@ -268,7 +193,7 @@ func (d *decoder) decodeUnknown(doc []byte, ct ContentType) (runtime.Object, err
+
+ func (d *decoder) handleDecodeError(doc []byte, origErr error) error {
+ // Parse the document's TypeMeta information
+- gvk, err := extractYAMLTypeMeta(doc)
++ gvk, err := serializeryaml.DefaultMetaFactory.Interpret(doc)
+ if err != nil {
+ return fmt.Errorf("failed to interpret TypeMeta from the given the YAML: %v. Decode error was: %w", err, origErr)
+ }
+@@ -320,18 +245,6 @@ func (d *decoder) extractNestedObjects(obj runtime.Object, ct ContentType) ([]ru
+ return objs, nil
+ }
+
+-func newDecoder(schemeAndCodec *schemeAndCodec, opts DecodingOptions) Decoder {
+- // Allow both YAML and JSON inputs (JSON is a subset of YAML), and deserialize in strict mode
+- s := json.NewSerializerWithOptions(json.DefaultMetaFactory, schemeAndCodec.scheme, schemeAndCodec.scheme, json.SerializerOptions{
+- Yaml: true,
+- Strict: *opts.Strict,
+- })
+-
+- decodeCodec := decoderForVersion(schemeAndCodec.scheme, s, *opts.Default, *opts.ConvertToHub)
+-
+- return &decoder{schemeAndCodec, decodeCodec, opts}
+-}
+-
+ // decoderForVersion is used instead of CodecFactory.DecoderForVersion, as we want to use our own converter
+ func decoderForVersion(scheme *runtime.Scheme, decoder *json.Serializer, doDefaulting, doConversion bool) runtime.Decoder {
+ return newConversionCodecForScheme(
+@@ -361,20 +274,38 @@ func newConversionCodecForScheme(
+ defaulter = scheme
+ }
+ convertor := newObjectConvertor(scheme, performConversion)
+- return versioning.NewCodec(encoder, decoder, convertor, scheme, scheme, defaulter, encodeVersion, decodeVersion, scheme.Name())
++ // a typer that recognizes metav1.PartialObjectMetadata{,List}
++ typer := &customTyper{scheme}
++ return versioning.NewCodec(encoder, decoder, convertor, scheme, typer, defaulter, encodeVersion, decodeVersion, scheme.Name())
+ }
+
+-// TODO: Use https://github.com/kubernetes/apimachinery/blob/master/pkg/runtime/serializer/yaml/meta.go
+-// when we can assume everyone is vendoring k8s v1.19
+-func extractYAMLTypeMeta(data []byte) (*schema.GroupVersionKind, error) {
+- typeMeta := runtime.TypeMeta{}
+- if err := yaml.Unmarshal(data, &typeMeta); err != nil {
+- return nil, fmt.Errorf("could not interpret GroupVersionKind: %w", err)
+- }
+- gv, err := schema.ParseGroupVersion(typeMeta.APIVersion)
+- if err != nil {
+- return nil, err
++var _ runtime.ObjectTyper = &customTyper{}
++
++type customTyper struct {
++ scheme *runtime.Scheme
++}
++
++// ObjectKinds is an extension to the native Scheme.ObjectKinds function, that also
++// recognizes partial matadata objects and lists. The logic here follows closely the
++// scheme's own logic.
++func (t *customTyper) ObjectKinds(obj runtime.Object) ([]schema.GroupVersionKind, bool, error) {
++ // partial objects are always fine to encode/decode as-is when GVK is set.
++ // this similar code exists in runtime.Scheme.ObjectKinds for reference.
++ if IsPartialObject(obj) || IsPartialObjectList(obj) {
++ // we require that the GVK be populated in order to recognize the object
++ gvk := obj.GetObjectKind().GroupVersionKind()
++ if len(gvk.Kind) == 0 {
++ return nil, false, runtime.NewMissingKindErr("unstructured object has no kind")
++ }
++ if len(gvk.Version) == 0 {
++ return nil, false, runtime.NewMissingVersionErr("unstructured object has no version")
++ }
++ return []schema.GroupVersionKind{gvk}, false, nil
+ }
+- gvk := gv.WithKind(typeMeta.Kind)
+- return &gvk, nil
++ return t.scheme.ObjectKinds(obj)
++}
++
++// Recognizes just calls the underlying Scheme.Recognizes
++func (t *customTyper) Recognizes(gvk schema.GroupVersionKind) bool {
++ return t.scheme.Recognizes(gvk)
+ }
+diff --git a/pkg/serializer/encode.go b/pkg/serializer/encode.go
+index 7706193..a06bd8c 100644
+--- a/pkg/serializer/encode.go
++++ b/pkg/serializer/encode.go
+@@ -1,73 +1,25 @@
+ package serializer
+
+ import (
+- "github.com/sirupsen/logrus"
+- "github.com/weaveworks/libgitops/pkg/util"
++ "bytes"
++ "encoding/json"
++ "strings"
++
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ )
+
+-type EncodingOptions struct {
+- // Use pretty printing when writing to the output. (Default: true)
+- // TODO: Fix that sometimes omitempty fields aren't respected
+- Pretty *bool
+- // Whether to preserve YAML comments internally. This only works for objects embedding metav1.ObjectMeta.
+- // Only applicable to ContentTypeYAML framers.
+- // Using any other framer will be silently ignored. Usage of this option also requires setting
+- // the PreserveComments in DecodingOptions, too. (Default: false)
+- // TODO: Make this a BestEffort & Strict mode
+- PreserveComments *bool
+-
+- // TODO: Maybe consider an option to always convert to the preferred version (not just internal)
+-}
+-
+-type EncodingOptionsFunc func(*EncodingOptions)
+-
+-func WithPrettyEncode(pretty bool) EncodingOptionsFunc {
+- return func(opts *EncodingOptions) {
+- opts.Pretty = &pretty
+- }
+-}
+-
+-func WithCommentsEncode(comments bool) EncodingOptionsFunc {
+- return func(opts *EncodingOptions) {
+- opts.PreserveComments = &comments
+- }
+-}
+-
+-func WithEncodingOptions(newOpts EncodingOptions) EncodingOptionsFunc {
+- return func(opts *EncodingOptions) {
+- // TODO: Null-check all of these before using them
+- *opts = newOpts
+- }
+-}
+-
+-func defaultEncodeOpts() *EncodingOptions {
+- return &EncodingOptions{
+- Pretty: util.BoolPtr(true),
+- PreserveComments: util.BoolPtr(false),
+- }
+-}
+-
+-func newEncodeOpts(fns ...EncodingOptionsFunc) *EncodingOptions {
+- opts := defaultEncodeOpts()
+- for _, fn := range fns {
+- fn(opts)
++func newEncoder(schemeAndCodec *schemeAndCodec, opts EncodeOptions) Encoder {
++ return &encoder{
++ schemeAndCodec,
++ opts,
+ }
+- return opts
+ }
+
+ type encoder struct {
+ *schemeAndCodec
+
+- opts EncodingOptions
+-}
+-
+-func newEncoder(schemeAndCodec *schemeAndCodec, opts EncodingOptions) Encoder {
+- return &encoder{
+- schemeAndCodec,
+- opts,
+- }
++ opts EncodeOptions
+ }
+
+ // Encode encodes the given objects and writes them to the specified FrameWriter.
+@@ -75,6 +27,7 @@ func newEncoder(schemeAndCodec *schemeAndCodec, opts EncodingOptions) Encoder {
+ // internal object given to the preferred external groupversion. No conversion will happen
+ // if the given object is of an external version.
+ // TODO: This should automatically convert to the preferred version
++// TODO: Fix that sometimes omitempty fields aren't respected
+ func (e *encoder) Encode(fw FrameWriter, objs ...runtime.Object) error {
+ for _, obj := range objs {
+ // Get the kind for the given object
+@@ -110,23 +63,23 @@ func (e *encoder) EncodeForGroupVersion(fw FrameWriter, obj runtime.Object, gv s
+ return ErrUnsupportedContentType
+ }
+
+- // Choose the pretty or non-pretty one
++ // Choose the default, non-pretty serializer, as we prettify if needed later
++ // We technically could use the JSON PrettySerializer here, but it does not catch the
++ // cases where the JSON iterator invokes MarshalJSON() on an object, and that object
++ // returns non-pretty bytes (e.g. *unstructured.Unstructured). Hence, it is more robust
++ // and extensible to always use the non-pretty serializer, and only on request indent
++ // a given number of spaces after JSON encoding.
+ encoder := serializerInfo.Serializer
+
+- // Use the pretty serializer if it was asked for and is defined for the content type
+- if *e.opts.Pretty {
+- // Apparently not all SerializerInfos have this field defined (e.g. YAML)
+- // TODO: This could be considered a bug in upstream, create an issue
+- if serializerInfo.PrettySerializer != nil {
+- encoder = serializerInfo.PrettySerializer
+- } else {
+- logrus.Debugf("PrettySerializer for ContentType %s is nil, falling back to Serializer.", fw.ContentType())
+- }
+- }
+-
+ // Get a version-specific encoder for the specified groupversion
+ versionEncoder := encoderForVersion(e.scheme, encoder, gv)
+
++ // Check if the user requested prettified JSON output.
++ // If the ContentType is JSON this is ok, we will intent the encode output on the fly.
++ if *e.opts.JSONIndent > 0 && fw.ContentType() == ContentTypeJSON {
++ fw = &jsonPrettyFrameWriter{indent: *e.opts.JSONIndent, fw: fw}
++ }
++
+ // Cast the object to a metav1.Object to get access to annotations
+ metaobj, ok := toMetaObject(obj)
+ // For objects without ObjectMeta, the cast will fail. Allow that failure and do "normal" encoding
+@@ -150,3 +103,24 @@ func encoderForVersion(scheme *runtime.Scheme, encoder runtime.Encoder, gv schem
+ true, // convert if needed before encode
+ )
+ }
++
++type jsonPrettyFrameWriter struct {
++ indent int
++ fw FrameWriter
++}
++
++func (w *jsonPrettyFrameWriter) Write(p []byte) (n int, err error) {
++ // Indent the source bytes
++ var indented bytes.Buffer
++ err = json.Indent(&indented, p, "", strings.Repeat(" ", w.indent))
++ if err != nil {
++ return
++ }
++ // Write the pretty bytes to the underlying writer
++ n, err = w.fw.Write(indented.Bytes())
++ return
++}
++
++func (w *jsonPrettyFrameWriter) ContentType() ContentType {
++ return w.fw.ContentType()
++}
+diff --git a/pkg/serializer/frame_reader.go b/pkg/serializer/frame_reader.go
+index 26ead8d..a2ba308 100644
+--- a/pkg/serializer/frame_reader.go
++++ b/pkg/serializer/frame_reader.go
+@@ -6,6 +6,7 @@ import (
+ "io"
+ "io/ioutil"
+ "os"
++ "sync"
+
+ "k8s.io/apimachinery/pkg/runtime/serializer/json"
+ )
+@@ -71,6 +72,7 @@ func NewJSONFrameReader(rc ReadCloser) FrameReader {
+ func newFrameReader(rc io.ReadCloser, contentType ContentType) *frameReader {
+ return &frameReader{
+ rc: rc,
++ rcMu: &sync.Mutex{},
+ bufSize: defaultBufSize,
+ maxFrameSize: defaultMaxFrameSize,
+ contentType: contentType,
+@@ -79,12 +81,13 @@ func newFrameReader(rc io.ReadCloser, contentType ContentType) *frameReader {
+
+ // frameReader is a FrameReader implementation
+ type frameReader struct {
+- rc io.ReadCloser
++ // the underlying readcloser and the mutex that guards it
++ rc io.ReadCloser
++ rcMu *sync.Mutex
++
+ bufSize int
+ maxFrameSize int
+ contentType ContentType
+-
+- // TODO: Maybe add mutexes for thread-safety (so no two goroutines read at the same time)
+ }
+
+ // ReadFrame reads one frame from the underlying io.Reader. ReadFrame
+@@ -93,6 +96,10 @@ type frameReader struct {
+ // ReadFrame keeps on reading using new calls. ReadFrame might return both data and
+ // io.EOF. io.EOF will be returned in the final call.
+ func (rf *frameReader) ReadFrame() (frame []byte, err error) {
++ // Only one actor can read at a time
++ rf.rcMu.Lock()
++ defer rf.rcMu.Unlock()
++
+ // Temporary buffer to parts of a frame into
+ var buf []byte
+ // How many bytes were read by the read call
+@@ -149,6 +156,10 @@ func (rf *frameReader) ContentType() ContentType {
+
+ // Close implements io.Closer and closes the underlying ReadCloser
+ func (rf *frameReader) Close() error {
++ // Only one actor can access rf.rc at a time
++ rf.rcMu.Lock()
++ defer rf.rcMu.Unlock()
++
+ return rf.rc.Close()
+ }
+
+@@ -166,3 +177,42 @@ func FromFile(filePath string) ReadCloser {
+ func FromBytes(content []byte) ReadCloser {
+ return ioutil.NopCloser(bytes.NewReader(content))
+ }
++
++// NewSingleFrameReader returns a FrameReader for only a single frame of
++// the specified content type. This avoids overhead if it is known that the
++// byte array only contains one frame. The given frame is returned in
++// whole in the first ReadFrame() call, and io.EOF is returned in all future
++// invocations. This FrameReader works for any ContentType and transparently
++// exposes what was given through the ContentType() method.
++func NewSingleFrameReader(b []byte, ct ContentType) FrameReader {
++ return &singleFrameReader{
++ ct: ct,
++ b: b,
++ hasBeenRead: false,
++ hasBeenReadMu: &sync.Mutex{},
++ }
++}
++
++var _ FrameReader = &singleFrameReader{}
++
++type singleFrameReader struct {
++ ct ContentType
++ b []byte
++ hasBeenRead bool
++ hasBeenReadMu *sync.Mutex
++}
++
++func (r *singleFrameReader) ReadFrame() ([]byte, error) {
++ r.hasBeenReadMu.Lock()
++ defer r.hasBeenReadMu.Unlock()
++ // If ReadFrame() has been called once, just return io.EOF.
++ if r.hasBeenRead {
++ return nil, io.EOF
++ }
++ // The first time, mark that we've read, and return the single frame
++ r.hasBeenRead = true
++ return r.b, nil
++}
++
++func (r *singleFrameReader) ContentType() ContentType { return r.ct }
++func (r *singleFrameReader) Close() error { return nil }
+diff --git a/pkg/serializer/frame_reader_test.go b/pkg/serializer/frame_reader_test.go
+index a696ed7..063ed8a 100644
+--- a/pkg/serializer/frame_reader_test.go
++++ b/pkg/serializer/frame_reader_test.go
+@@ -5,6 +5,7 @@ import (
+ "io/ioutil"
+ "reflect"
+ "strings"
++ "sync"
+ "testing"
+
+ "k8s.io/apimachinery/pkg/runtime/serializer/json"
+@@ -92,6 +93,7 @@ func Test_FrameReader_ReadFrame(t *testing.T) {
+ t.Run(tt.name, func(t *testing.T) {
+ rf := &frameReader{
+ rc: tt.fields.rc,
++ rcMu: &sync.Mutex{},
+ bufSize: tt.fields.bufSize,
+ maxFrameSize: tt.fields.maxFrameSize,
+ }
+diff --git a/pkg/serializer/options.go b/pkg/serializer/options.go
+new file mode 100644
+index 0000000..e5736e6
+--- /dev/null
++++ b/pkg/serializer/options.go
+@@ -0,0 +1,258 @@
++package serializer
++
++import (
++ "k8s.io/utils/pointer"
++)
++
++// TODO: Import k8s.io/utils/pointer instead of baking our own ptrutils package.
++
++type EncodeOption interface {
++ ApplyToEncode(*EncodeOptions)
++}
++
++func defaultEncodeOpts() *EncodeOptions {
++ return &EncodeOptions{
++ // Default to "pretty encoding"
++ JSONIndent: pointer.Int32Ptr(2),
++ PreserveComments: PreserveCommentsDisable,
++ }
++}
++
++type EncodeOptions struct {
++ // Indent JSON encoding output with this many spaces.
++ // Set this to 0, use PrettyEncode(false) or JSONIndent(0) to disable pretty output.
++ // Only applicable to ContentTypeJSON framers.
++ //
++ // Default: 2, i.e. pretty output
++ // TODO: Make this a property of the FrameWriter instead?
++ JSONIndent *int32
++
++ // Whether to preserve YAML comments internally.
++ // This only works for objects embedding metav1.ObjectMeta.
++ //
++ // Only applicable to ContentTypeYAML framers. Using any other framer will be silently ignored.
++ //
++ // Usage of this option also requires setting the PreserveComments in DecodeOptions, too.
++ //
++ // Default: PreserveCommentsDisable
++ PreserveComments PreserveComments
++
++ // TODO: Maybe consider an option to always convert to the preferred version (not just internal)
++}
++
++var _ EncodeOption = &EncodeOptions{}
++
++func (o *EncodeOptions) ApplyToEncode(target *EncodeOptions) {
++ if o.JSONIndent != nil {
++ target.JSONIndent = o.JSONIndent
++ }
++ if o.PreserveComments != 0 {
++ target.PreserveComments = o.PreserveComments
++ }
++}
++
++func (o *EncodeOptions) ApplyOptions(opts []EncodeOption) *EncodeOptions {
++ for _, opt := range opts {
++ opt.ApplyToEncode(o)
++ }
++ // it is guaranteed that all options are non-nil, as defaultEncodeOpts() includes all fields
++ return o
++}
++
++// Whether to preserve YAML comments internally.
++// This only works for objects embedding metav1.ObjectMeta.
++//
++// Only applicable to ContentTypeYAML framers. Using any other framer will be silently ignored.
++// TODO: Add a BestEffort mode
++type PreserveComments int
++
++const (
++ // PreserveCommentsDisable means do not try to preserve comments
++ PreserveCommentsDisable PreserveComments = 1 + iota
++ // PreserveCommentsStrict means try to preserve comments, and fail if it does not work
++ PreserveCommentsStrict
++)
++
++var _ EncodeOption = PreserveComments(0)
++var _ DecodeOption = PreserveComments(0)
++
++func (p PreserveComments) ApplyToEncode(target *EncodeOptions) {
++ // TODO: Validate?
++ target.PreserveComments = p
++}
++
++func (p PreserveComments) ApplyToDecode(target *DecodeOptions) {
++ // TODO: Validate?
++ target.PreserveComments = p
++}
++
++// Indent JSON encoding output with this many spaces.
++// Use PrettyEncode(false) or JSONIndent(0) to disable pretty output.
++// Only applicable to ContentTypeJSON framers.
++type JSONIndent int32
++
++var _ EncodeOption = JSONIndent(0)
++
++func (i JSONIndent) ApplyToEncode(target *EncodeOptions) {
++ target.JSONIndent = pointer.Int32Ptr(int32(i))
++}
++
++// Shorthand for JSONIndent(0) if false, or JSONIndent(2) if true
++type PrettyEncode bool
++
++var _ EncodeOption = PrettyEncode(false)
++
++func (pretty PrettyEncode) ApplyToEncode(target *EncodeOptions) {
++ if pretty {
++ JSONIndent(2).ApplyToEncode(target)
++ } else {
++ JSONIndent(0).ApplyToEncode(target)
++ }
++}
++
++// DECODING
++
++type DecodeOption interface {
++ ApplyToDecode(*DecodeOptions)
++}
++
++func defaultDecodeOpts() *DecodeOptions {
++ return &DecodeOptions{
++ ConvertToHub: pointer.BoolPtr(false),
++ Strict: pointer.BoolPtr(true),
++ Default: pointer.BoolPtr(false),
++ DecodeListElements: pointer.BoolPtr(true),
++ PreserveComments: PreserveCommentsDisable,
++ DecodeUnknown: pointer.BoolPtr(false),
++ }
++}
++
++type DecodeOptions struct {
++ // Not applicable for Decoder.DecodeInto(). If true, the decoded external object
++ // will be converted into its hub (or internal, where applicable) representation.
++ // Otherwise, the decoded object will be left in its external representation.
++ //
++ // Default: false
++ ConvertToHub *bool
++
++ // Parse the YAML/JSON in strict mode, returning a specific error if the input
++ // contains duplicate or unknown fields or formatting errors.
++ //
++ // Default: true
++ Strict *bool
++
++ // Automatically default the decoded object.
++ // Default: false
++ Default *bool
++
++ // Only applicable for Decoder.DecodeAll(). If the underlying data contains a v1.List,
++ // the items of the list will be traversed, decoded into their respective types, and
++ // appended to the returned slice. The v1.List will in this case not be returned.
++ // This conversion does NOT support preserving comments. If the given scheme doesn't
++ // recognize the v1.List, before using it will be registered automatically.
++ //
++ // Default: true
++ DecodeListElements *bool
++
++ // Whether to preserve YAML comments internally.
++ // This only works for objects embedding metav1.ObjectMeta.
++ //
++ // Only applicable to ContentTypeYAML framers. Using any other framer will be silently ignored.
++ //
++ // Usage of this option also requires setting the PreserveComments in EncodeOptions, too.
++ //
++ // Default: PreserveCommentsDisable
++ PreserveComments PreserveComments
++
++ // DecodeUnknown specifies whether decode objects with an unknown GroupVersionKind into a
++ // *runtime.Unknown object when running Decode(All) (true value) or to return an error when
++ // any unrecognized type is found (false value).
++ //
++ // Default: false
++ DecodeUnknown *bool
++}
++
++var _ DecodeOption = &DecodeOptions{}
++
++func (o *DecodeOptions) ApplyToDecode(target *DecodeOptions) {
++ if o.ConvertToHub != nil {
++ target.ConvertToHub = o.ConvertToHub
++ }
++ if o.Strict != nil {
++ target.Strict = o.Strict
++ }
++ if o.Default != nil {
++ target.Default = o.Default
++ }
++ if o.DecodeListElements != nil {
++ target.DecodeListElements = o.DecodeListElements
++ }
++ if o.PreserveComments != 0 {
++ target.PreserveComments = o.PreserveComments
++ }
++ if o.DecodeUnknown != nil {
++ target.DecodeUnknown = o.DecodeUnknown
++ }
++}
++
++func (o *DecodeOptions) ApplyOptions(opts []DecodeOption) *DecodeOptions {
++ for _, opt := range opts {
++ opt.ApplyToDecode(o)
++ }
++ // it is guaranteed that all options are non-nil, as defaultDecodeOpts() includes all fields
++ return o
++}
++
++// Not applicable for Decoder.DecodeInto(). If true, the decoded external object
++// will be converted into its hub (or internal, where applicable) representation.
++// Otherwise, the decoded object will be left in its external representation.
++type ConvertToHub bool
++
++var _ DecodeOption = ConvertToHub(false)
++
++func (b ConvertToHub) ApplyToDecode(target *DecodeOptions) {
++ target.ConvertToHub = pointer.BoolPtr(bool(b))
++}
++
++// Parse the YAML/JSON in strict mode, returning a specific error if the input
++// contains duplicate or unknown fields or formatting errors.
++type DecodeStrict bool
++
++var _ DecodeOption = DecodeStrict(false)
++
++func (b DecodeStrict) ApplyToDecode(target *DecodeOptions) {
++ target.Strict = pointer.BoolPtr(bool(b))
++}
++
++// Automatically default the decoded object.
++type DefaultAtDecode bool
++
++var _ DecodeOption = DefaultAtDecode(false)
++
++func (b DefaultAtDecode) ApplyToDecode(target *DecodeOptions) {
++ target.Default = pointer.BoolPtr(bool(b))
++}
++
++// Only applicable for Decoder.DecodeAll(). If the underlying data contains a v1.List,
++// the items of the list will be traversed, decoded into their respective types, and
++// appended to the returned slice. The v1.List will in this case not be returned.
++// This conversion does NOT support preserving comments. If the given scheme doesn't
++// recognize the v1.List, before using it will be registered automatically.
++type DecodeListElements bool
++
++var _ DecodeOption = DecodeListElements(false)
++
++func (b DecodeListElements) ApplyToDecode(target *DecodeOptions) {
++ target.DecodeListElements = pointer.BoolPtr(bool(b))
++}
++
++// DecodeUnknown specifies whether decode objects with an unknown GroupVersionKind into a
++// *runtime.Unknown object when running Decode(All) (true value) or to return an error when
++// any unrecognized type is found (false value).
++type DecodeUnknown bool
++
++var _ DecodeOption = DecodeUnknown(false)
++
++func (b DecodeUnknown) ApplyToDecode(target *DecodeOptions) {
++ target.DecodeUnknown = pointer.BoolPtr(bool(b))
++}
+diff --git a/pkg/serializer/patch.go b/pkg/serializer/patch.go
+new file mode 100644
+index 0000000..bd580e0
+--- /dev/null
++++ b/pkg/serializer/patch.go
+@@ -0,0 +1,124 @@
++package serializer
++
++import (
++ "bytes"
++ "encoding/json"
++ "errors"
++
++ "github.com/weaveworks/libgitops/pkg/util/patch"
++ "k8s.io/apimachinery/pkg/runtime"
++ "k8s.io/apimachinery/pkg/util/strategicpatch"
++ openapi "k8s.io/kube-openapi/pkg/util/proto"
++)
++
++// TODO: Move pkg/util/patch under pkg/serializer?
++
++type Patcher interface {
++ // ApplyOnStruct applies the given patch (JSON-encoded) using the given BytePatcher
++ // (that knows how to operate on that kind of patch type) into obj.
++ //
++ // obj MUST be a typed object. Unversioned, partial or unstructured objects are not
++ // supported. For those use-cases, convert your object into an unstructured one, and
++ // pass it to ApplyOnUnstructured.
++ //
++ // obj MUST NOT be an internal type. If you operate on an internal object as your "hub",
++ // convert the object yourself first to the GroupVersion of the patch bytes, and then
++ // convert back after this call.
++ //
++ // In case the patch would require knowledge about the schema (e.g. StrategicMergePatch),
++ // this function looks that metadata up using reflection of obj.
++ ApplyOnStruct(bytePatcher patch.BytePatcher, patch []byte, obj runtime.Object) error
++
++ // ApplyOnUnstructured applies the given patch (JSON-encoded) using the given BytePatcher
++ // (that knows how to operate on that kind of patch type) into the unstructured obj.
++ //
++ // If knowledge about the schema is required by the patch type (e.g. StrategicMergePatch),
++ // it is the liability of the caller to provide an OpenAPI schema.
++ ApplyOnUnstructured(bytePatcher patch.BytePatcher, patch []byte, obj runtime.Unstructured, schema openapi.Schema) error
++}
++
++type patcher struct {
++ *schemeAndCodec
++}
++
++// ApplyOnStruct applies the given patch (JSON-encoded) using the given BytePatcher
++// (that knows how to operate on that kind of patch type) into obj.
++//
++// obj MUST be a typed object. Unversioned, partial or unstructured objects are not
++// supported. For those use-cases, convert your object into an unstructured one, and
++// pass it to ApplyOnUnstructured.
++//
++// obj MUST NOT be an internal type. If you operate on an internal object as your "hub",
++// convert the object yourself first to the GroupVersion of the patch bytes, and then
++// convert back after this call.
++//
++// In case the patch would require knowledge about the schema (e.g. StrategicMergePatch),
++// this function looks that metadata up using reflection of obj.
++func (p *patcher) ApplyOnStruct(bytePatcher patch.BytePatcher, patch []byte, obj runtime.Object) error {
++ // Require that obj is typed
++ if !IsTyped(obj, p.scheme) {
++ return errors.New("obj must be typed")
++ }
++ // Get the GVK so we can check if obj is internal
++ gvk, err := GVKForObject(p.scheme, obj)
++ if err != nil {
++ return err
++ }
++ // It must not be internal, as we will encode it soon.
++ if gvk.Version == runtime.APIVersionInternal {
++ return errors.New("obj must not be internal")
++ }
++
++ // Create a non-pretty encoder
++ encopt := *defaultEncodeOpts().ApplyOptions([]EncodeOption{PrettyEncode(false)})
++ enc := newEncoder(p.schemeAndCodec, encopt)
++ // Encode without conversion to the buffer
++ var buf bytes.Buffer
++ if err := enc.EncodeForGroupVersion(NewJSONFrameWriter(&buf), obj, gvk.GroupVersion()); err != nil {
++ return err
++ }
++
++ // Get the schema in case needed by the BytePatcher
++ schema, err := strategicpatch.NewPatchMetaFromStruct(obj)
++ if err != nil {
++ return err
++ }
++
++ // Apply the patch, and get the new JSON out
++ newJSON, err := bytePatcher.Apply(buf.Bytes(), patch, schema)
++ if err != nil {
++ return err
++ }
++
++ // Decode into the object to apply the changes
++ fr := NewSingleFrameReader(newJSON, ContentTypeJSON)
++ dec := newDecoder(p.schemeAndCodec, *defaultDecodeOpts())
++ if err := dec.DecodeInto(fr, obj); err != nil {
++ return err
++ }
++
++ return nil
++}
++
++func (p *patcher) ApplyOnUnstructured(bytePatcher patch.BytePatcher, patch []byte, obj runtime.Unstructured, schema openapi.Schema) error {
++ // Marshal the object to form the source JSON
++ sourceJSON, err := json.Marshal(obj)
++ if err != nil {
++ return err
++ }
++
++ // Conditionally get the schema from the provided OpenAPI spec
++ var patchMeta strategicpatch.LookupPatchMeta
++ if schema != nil {
++ patchMeta = strategicpatch.NewPatchMetaFromOpenAPI(schema)
++ }
++
++ // Apply the patch, and get the new JSON out
++ newJSON, err := bytePatcher.Apply(sourceJSON, patch, patchMeta)
++ if err != nil {
++ return err
++ }
++
++ // Decode back into obj
++ return json.Unmarshal(newJSON, obj)
++}
+diff --git a/pkg/serializer/serializer.go b/pkg/serializer/serializer.go
+index eb798c9..fbbcdd1 100644
+--- a/pkg/serializer/serializer.go
++++ b/pkg/serializer/serializer.go
+@@ -3,6 +3,7 @@ package serializer
+ import (
+ "errors"
+ "fmt"
++ "sync"
+
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+@@ -22,8 +23,12 @@ const (
+ ContentTypeYAML = ContentType(runtime.ContentTypeYAML)
+ )
+
+-// ErrUnsupportedContentType is returned if the specified content type isn't supported
+-var ErrUnsupportedContentType = errors.New("unsupported content type")
++var (
++ // ErrUnsupportedContentType is returned if the specified content type isn't supported
++ ErrUnsupportedContentType = errors.New("unsupported content type")
++ // ErrObjectIsNotList is returned when a runtime.Object was not a List type
++ ErrObjectIsNotList = errors.New("given runtime.Object is not a *List type, or does not implement metav1.ListInterface")
++)
+
+ // ContentTyped is an interface for objects that are specific to a set ContentType.
+ type ContentTyped interface {
+@@ -31,6 +36,8 @@ type ContentTyped interface {
+ ContentType() ContentType
+ }
+
++func (ct ContentType) ContentType() ContentType { return ct }
++
+ // Serializer is an interface providing high-level decoding/encoding functionality
+ // for types registered in a *runtime.Scheme
+ type Serializer interface {
+@@ -38,13 +45,13 @@ type Serializer interface {
+ // a FrameWriter. The decoder can be customized by passing some options (e.g. WithDecodingOptions)
+ // to this call.
+ // The decoder supports both "classic" API Machinery objects and controller-runtime CRDs
+- Decoder(optsFn ...DecodingOptionsFunc) Decoder
++ Decoder(optsFn ...DecodeOption) Decoder
+
+ // Encoder is a high-level interface for encoding Kubernetes API Machinery objects and writing them
+ // to a FrameWriter. The encoder can be customized by passing some options (e.g. WithEncodingOptions)
+ // to this call.
+ // The encoder supports both "classic" API Machinery objects and controller-runtime CRDs
+- Encoder(optsFn ...EncodingOptionsFunc) Encoder
++ Encoder(optsFn ...EncodeOption) Encoder
+
+ // Converter is a high-level interface for converting objects between different versions
+ // The converter supports both "classic" API Machinery objects and controller-runtime CRDs
+@@ -53,6 +60,8 @@ type Serializer interface {
+ // Defaulter is a high-level interface for accessing defaulting functions in a scheme
+ Defaulter() Defaulter
+
++ Patcher() Patcher
++
+ // Scheme provides access to the underlying runtime.Scheme, may be used for low-level access to
+ // the "type universe" and advanced conversion/defaulting features
+ Scheme() *runtime.Scheme
+@@ -63,8 +72,10 @@ type Serializer interface {
+ }
+
+ type schemeAndCodec struct {
+- scheme *runtime.Scheme
+- codecs *k8sserializer.CodecFactory
++ // scheme is not thread-safe, hence it is guarded by a mutex
++ scheme *runtime.Scheme
++ schemeMu *sync.Mutex
++ codecs *k8sserializer.CodecFactory
+ }
+
+ // Encoder is a high-level interface for encoding Kubernetes API Machinery objects and writing them
+@@ -186,13 +197,16 @@ func NewSerializer(scheme *runtime.Scheme, codecs *k8sserializer.CodecFactory) S
+ *codecs = k8sserializer.NewCodecFactory(scheme)
+ }
+
++ schemeCodec := &schemeAndCodec{
++ scheme: scheme,
++ schemeMu: &sync.Mutex{},
++ codecs: codecs,
++ }
+ return &serializer{
+- schemeAndCodec: &schemeAndCodec{
+- scheme: scheme,
+- codecs: codecs,
+- },
+- converter: newConverter(scheme),
+- defaulter: newDefaulter(scheme),
++ schemeAndCodec: schemeCodec,
++ converter: newConverter(scheme),
++ defaulter: newDefaulter(scheme),
++ patcher: &patcher{schemeCodec},
+ }
+ }
+
+@@ -201,6 +215,7 @@ type serializer struct {
+ *schemeAndCodec
+ converter *converter
+ defaulter *defaulter
++ patcher *patcher
+ }
+
+ // Scheme provides access to the underlying runtime.Scheme, may be used for low-level access to
+@@ -215,14 +230,12 @@ func (s *serializer) Codecs() *k8sserializer.CodecFactory {
+ return s.codecs
+ }
+
+-func (s *serializer) Decoder(optFns ...DecodingOptionsFunc) Decoder {
+- opts := newDecodeOpts(optFns...)
+- return newDecoder(s.schemeAndCodec, *opts)
++func (s *serializer) Decoder(opts ...DecodeOption) Decoder {
++ return newDecoder(s.schemeAndCodec, *defaultDecodeOpts().ApplyOptions(opts))
+ }
+
+-func (s *serializer) Encoder(optFns ...EncodingOptionsFunc) Encoder {
+- opts := newEncodeOpts(optFns...)
+- return newEncoder(s.schemeAndCodec, *opts)
++func (s *serializer) Encoder(opts ...EncodeOption) Encoder {
++ return newEncoder(s.schemeAndCodec, *defaultEncodeOpts().ApplyOptions(opts))
+ }
+
+ func (s *serializer) Converter() Converter {
+@@ -233,6 +246,10 @@ func (s *serializer) Defaulter() Defaulter {
+ return s.defaulter
+ }
+
++func (s *serializer) Patcher() Patcher {
++ return s.patcher
++}
++
+ func prioritizedVersionForGroup(scheme *runtime.Scheme, groupName string) (schema.GroupVersion, error) {
+ // Get the prioritized versions for the given group
+ gvs := scheme.PrioritizedVersionsForGroup(groupName)
+@@ -242,23 +259,3 @@ func prioritizedVersionForGroup(scheme *runtime.Scheme, groupName string) (schem
+ // Use the first, preferred, (external) version
+ return gvs[0], nil
+ }
+-
+-func GVKForObject(scheme *runtime.Scheme, obj runtime.Object) (schema.GroupVersionKind, error) {
+- // If we already have TypeMeta filled in here, just use it
+- // TODO: This is probably not needed
+- gvk := obj.GetObjectKind().GroupVersionKind()
+- if !gvk.Empty() {
+- return gvk, nil
+- }
+-
+- // TODO: If there are two GVKs returned, it's probably a misconfiguration in the scheme
+- // It might be expected though, and we can tolerate setting the GVK manually IFF there are more than
+- // one ObjectKind AND the given GVK is one of them.
+-
+- // Get the possible kinds for the object
+- gvks, unversioned, err := scheme.ObjectKinds(obj)
+- if unversioned || err != nil || len(gvks) != 1 {
+- return schema.GroupVersionKind{}, fmt.Errorf("unversioned %t or err %v or invalid gvks %v", unversioned, err, gvks)
+- }
+- return gvks[0], nil
+-}
+diff --git a/pkg/serializer/serializer_test.go b/pkg/serializer/serializer_test.go
+index ba23985..c475ec7 100644
+--- a/pkg/serializer/serializer_test.go
++++ b/pkg/serializer/serializer_test.go
+@@ -21,8 +21,8 @@ var (
+ codecs = k8sserializer.NewCodecFactory(scheme)
+ ourserializer = NewSerializer(scheme, &codecs)
+ defaultEncoder = ourserializer.Encoder(
+- WithPrettyEncode(false), // TODO: Also test the pretty serializer
+- WithCommentsEncode(true),
++ PrettyEncode(false), // TODO: Also test the pretty serializer
++ PreserveCommentsStrict,
+ )
+
+ groupname = "foogroup"
+@@ -402,8 +402,8 @@ func TestDecode(t *testing.T) {
+ for _, rt := range tests {
+ t.Run(rt.name, func(t2 *testing.T) {
+ obj, actual := ourserializer.Decoder(
+- WithDefaultsDecode(rt.doDefaulting),
+- WithConvertToHubDecode(rt.doConversion),
++ DefaultAtDecode(rt.doDefaulting),
++ ConvertToHub(rt.doConversion),
+ ).Decode(NewYAMLFrameReader(FromBytes(rt.data)))
+ if (actual != nil) != rt.expectedErr {
+ t2.Errorf("expected error %t but actual %t: %v", rt.expectedErr, actual != nil, actual)
+@@ -444,7 +444,7 @@ func TestDecodeInto(t *testing.T) {
+ t.Run(rt.name, func(t2 *testing.T) {
+
+ actual := ourserializer.Decoder(
+- WithDefaultsDecode(rt.doDefaulting),
++ DefaultAtDecode(rt.doDefaulting),
+ ).DecodeInto(NewYAMLFrameReader(FromBytes(rt.data)), rt.obj)
+ if (actual != nil) != rt.expectedErr {
+ t2.Errorf("expected error %t but actual %t: %v", rt.expectedErr, actual != nil, actual)
+@@ -484,8 +484,8 @@ func TestDecodeAll(t *testing.T) {
+ for _, rt := range tests {
+ t.Run(rt.name, func(t2 *testing.T) {
+ objs, actual := ourserializer.Decoder(
+- WithDefaultsDecode(rt.doDefaulting),
+- WithListElementsDecoding(rt.listSplit),
++ DefaultAtDecode(rt.doDefaulting),
++ DecodeListElements(rt.listSplit),
+ ).DecodeAll(NewYAMLFrameReader(FromBytes(rt.data)))
+ if (actual != nil) != rt.expectedErr {
+ t2.Errorf("expected error %t but actual %t: %v", rt.expectedErr, actual != nil, actual)
+@@ -527,7 +527,7 @@ func TestDecodeUnknown(t *testing.T) {
+ for _, rt := range tests {
+ t.Run(rt.name, func(t2 *testing.T) {
+ obj, actual := ourserializer.Decoder(
+- WithUnknownDecode(rt.unknown),
++ DecodeUnknown(rt.unknown),
+ ).Decode(NewYAMLFrameReader(FromBytes(rt.data)))
+ if (actual != nil) != rt.expectedErr {
+ t2.Errorf("expected error %t but actual %t: %v", rt.expectedErr, actual != nil, actual)
+@@ -560,9 +560,9 @@ func TestRoundtrip(t *testing.T) {
+ for _, rt := range tests {
+ t.Run(rt.name, func(t2 *testing.T) {
+ obj, err := ourserializer.Decoder(
+- WithConvertToHubDecode(true),
+- WithCommentsDecode(true),
+- WithUnknownDecode(true),
++ ConvertToHub(true),
++ PreserveCommentsStrict,
++ DecodeUnknown(true),
+ ).Decode(NewYAMLFrameReader(FromBytes(rt.data)))
+ if err != nil {
+ t2.Errorf("unexpected decode error: %v", err)
+diff --git a/pkg/serializer/utils.go b/pkg/serializer/utils.go
+new file mode 100644
+index 0000000..f916a7a
+--- /dev/null
++++ b/pkg/serializer/utils.go
+@@ -0,0 +1,121 @@
++package serializer
++
++import (
++ "fmt"
++ "strings"
++
++ "k8s.io/apimachinery/pkg/api/meta"
++ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
++ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
++ "k8s.io/apimachinery/pkg/runtime"
++ "k8s.io/apimachinery/pkg/runtime/schema"
++ "sigs.k8s.io/controller-runtime/pkg/client"
++ "sigs.k8s.io/controller-runtime/pkg/client/apiutil"
++)
++
++func GVKForObject(scheme *runtime.Scheme, obj runtime.Object) (schema.GroupVersionKind, error) {
++ // Safety check: one should not do this
++ if obj == nil || obj.GetObjectKind() == nil {
++ return schema.GroupVersionKind{}, fmt.Errorf("GVKForObject: obj or obj.GetObjectKind() must not be nil")
++ }
++
++ // If this is a runtime.Unknown object, return the GVK stored in TypeMeta
++ if gvk := obj.GetObjectKind().GroupVersionKind(); IsUnknown(obj) && !gvk.Empty() {
++ return gvk, nil
++ }
++
++ // Special case: Allow objects with two versions to be registered, when the caller is specific
++ // about what version they want populated.
++ // This is needed essentially for working around that there are specific K8s types (structs)
++ // that have been registered with multiple GVKs (e.g. a Deployment struct in both apps & extensions)
++ // TODO: Maybe there is a better way to solve this? Remove unwanted entries from the scheme typeToGVK
++ // map manually?
++ gvks, _, _ := scheme.ObjectKinds(obj)
++ if len(gvks) > 1 {
++ // If we have a configuration with more than one gvk for the same object,
++ // check the set GVK on the object to "choose" the right one, if exists in the list
++ setGVK := obj.GetObjectKind().GroupVersionKind()
++ if !setGVK.Empty() {
++ for _, gvk := range gvks {
++ if EqualsGVK(setGVK, gvk) {
++ return gvk, nil
++ }
++ }
++ }
++ }
++
++ // TODO: Should we just copy-paste this one, or move it into k8s core to avoid importing controller-runtime
++ // only for this function?
++ return apiutil.GVKForObject(obj, scheme)
++}
++
++// GVKForList returns the GroupVersionKind for the items in a given List type.
++// In the case of Unstructured or PartialObjectMetadata, it is required that this
++// information is already set in TypeMeta. The "List" suffix is never returned.
++func GVKForList(obj client.ObjectList, scheme *runtime.Scheme) (schema.GroupVersionKind, error) {
++ // First, get the GVK as normal.
++ gvk, err := GVKForObject(scheme, obj)
++ if err != nil {
++ return schema.GroupVersionKind{}, err
++ }
++ // Make sure this is a list type, i.e. it has the an "Items" field.
++ isList := meta.IsListType(obj)
++ if !isList {
++ return schema.GroupVersionKind{}, ErrObjectIsNotList
++ }
++ // Make sure the returned GVK never ends in List.
++ gvk.Kind = strings.TrimSuffix(gvk.Kind, "List")
++ return gvk, nil
++}
++
++// EqualsGK returns true if gk1 and gk2 have the same fields.
++func EqualsGK(gk1, gk2 schema.GroupKind) bool {
++ return gk1.Group == gk2.Group && gk1.Kind == gk2.Kind
++}
++
++// EqualsGVK returns true if gvk1 and gvk2 have the same fields.
++func EqualsGVK(gvk1, gvk2 schema.GroupVersionKind) bool {
++ return EqualsGK(gvk1.GroupKind(), gvk2.GroupKind()) && gvk1.Version == gvk2.Version
++}
++
++func IsUnknown(obj runtime.Object) bool {
++ _, isUnknown := obj.(*runtime.Unknown)
++ return isUnknown
++}
++
++func IsPartialObject(obj runtime.Object) bool {
++ _, isPartial := obj.(*metav1.PartialObjectMetadata)
++ return isPartial
++}
++
++func IsPartialObjectList(obj runtime.Object) bool {
++ _, isPartialList := obj.(*metav1.PartialObjectMetadataList)
++ return isPartialList
++}
++
++// IsUnstructured checks if obj is runtime.Unstructured
++func IsUnstructured(obj runtime.Object) bool {
++ _, isUnstructured := obj.(runtime.Unstructured)
++ return isUnstructured
++}
++
++// IsUnstructuredList checks if obj is *unstructured.UnstructuredList
++func IsUnstructuredList(obj runtime.Object) bool {
++ _, isUnstructuredList := obj.(*unstructured.UnstructuredList)
++ return isUnstructuredList
++}
++
++// IsNonConvertible returns true for unstructured, partial and unknown objects
++// that should not be converted.
++func IsNonConvertible(obj runtime.Object) bool {
++ // TODO: Should Lists also be marked non-convertible?
++ // IsUnstructured also covers IsUnstructuredList -- *UnstructuredList implements runtime.Unstructured
++ return IsUnstructured(obj) || IsPartialObject(obj) || IsPartialObjectList(obj) || IsUnknown(obj)
++}
++
++// IsTyped returns true if the object is typed, i.e. registered with the given
++// scheme and not unversioned.
++func IsTyped(obj runtime.Object, scheme *runtime.Scheme) bool {
++ _, isUnversioned, err := scheme.ObjectKinds(obj)
++ return !isUnversioned && err == nil
++}
+diff --git a/pkg/storage/backend/backend.go b/pkg/storage/backend/backend.go
+new file mode 100644
+index 0000000..39d769e
+--- /dev/null
++++ b/pkg/storage/backend/backend.go
+@@ -0,0 +1,332 @@
++package backend
++
++import (
++ "bytes"
++ "context"
++ "errors"
++ "fmt"
++
++ "github.com/weaveworks/libgitops/pkg/serializer"
++ "github.com/weaveworks/libgitops/pkg/storage"
++ "github.com/weaveworks/libgitops/pkg/storage/core"
++ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
++ "k8s.io/apimachinery/pkg/runtime"
++ "k8s.io/apimachinery/pkg/util/sets"
++)
++
++var (
++ // ErrCannotSaveMetadata is returned if the user tries to save metadata-only objects
++ ErrCannotSaveMetadata = errors.New("cannot save (Create|Update|Patch) *metav1.PartialObjectMetadata")
++ // ErrNameRequired is returned when .metadata.name is unset
++ // TODO: Support generateName?
++ ErrNameRequired = errors.New(".metadata.name is required")
++)
++
++// TODO: Make a *core.Unknown that has
++// 1. TypeMeta
++// 2. DeepCopies (for Object compatibility),
++// 3. ObjectMeta
++// 4. Spec { Data []byte, ContentType ContentType, Object interface{} }
++// 5. Status { Data []byte, ContentType ContentType, Object interface{} }
++// TODO: Need to make sure we never write this internal struct to disk (MarshalJSON error?)
++
++type Accessors interface {
++ Storage() storage.Storage
++ NamespaceEnforcer() NamespaceEnforcer
++ Scheme() *runtime.Scheme
++}
++
++type WriteAccessors interface {
++ Validator() Validator
++ StorageVersioner() StorageVersioner
++}
++
++type Reader interface {
++ Accessors
++
++ Get(ctx context.Context, obj core.Object) error
++ storage.Lister
++}
++
++type Writer interface {
++ Accessors
++ WriteAccessors
++
++ Create(ctx context.Context, obj core.Object) error
++ Update(ctx context.Context, obj core.Object) error
++ Delete(ctx context.Context, obj core.Object) error
++}
++
++type StatusWriter interface {
++ Accessors
++ WriteAccessors
++
++ UpdateStatus(ctx context.Context, obj core.Object) error
++}
++
++type Backend interface {
++ Reader
++ Writer
++ StatusWriter
++}
++
++type ChangeOperation string
++
++const (
++ ChangeOperationCreate ChangeOperation = "create"
++ ChangeOperationUpdate ChangeOperation = "update"
++ ChangeOperationDelete ChangeOperation = "delete"
++)
++
++type Validator interface {
++ ValidateChange(ctx context.Context, backend Reader, op ChangeOperation, obj core.Object) error
++}
++
++type StorageVersioner interface {
++ // TODO: Do we need the context here?
++ StorageVersion(ctx context.Context, id core.ObjectID) (core.GroupVersion, error)
++}
++
++func NewGeneric(
++ storage storage.Storage,
++ serializer serializer.Serializer, // TODO: only scheme required, encode/decode optional?
++ enforcer NamespaceEnforcer,
++ validator Validator, // TODO: optional?
++ versioner StorageVersioner, // TODO: optional?
++) (*Generic, error) {
++ if storage == nil {
++ return nil, fmt.Errorf("storage is mandatory")
++ }
++ if serializer == nil { // TODO: relax this to scheme, and add encoder/decoder to opts?
++ return nil, fmt.Errorf("serializer is mandatory")
++ }
++ if enforcer == nil {
++ return nil, fmt.Errorf("enforcer is mandatory")
++ }
++ // TODO: validate options
++ return &Generic{
++ scheme: serializer.Scheme(),
++ encoder: serializer.Encoder(),
++ decoder: serializer.Decoder(),
++
++ storage: storage,
++ enforcer: enforcer,
++ validator: validator,
++ versioner: versioner,
++ }, nil
++}
++
++var _ Backend = &Generic{}
++
++type Generic struct {
++ scheme *runtime.Scheme
++ decoder serializer.Decoder
++ encoder serializer.Encoder
++
++ storage storage.Storage
++ enforcer NamespaceEnforcer
++ validator Validator
++ versioner StorageVersioner
++}
++
++func (b *Generic) Scheme() *runtime.Scheme {
++ return b.scheme
++}
++
++func (b *Generic) Storage() storage.Storage {
++ return b.storage
++}
++
++func (b *Generic) NamespaceEnforcer() NamespaceEnforcer {
++ return b.enforcer
++}
++
++func (b *Generic) Validator() Validator {
++ return b.validator
++}
++
++func (b *Generic) StorageVersioner() StorageVersioner {
++ return b.versioner
++}
++
++func (b *Generic) Get(ctx context.Context, obj core.Object) error {
++ // Get the versioned ID for the given obj. This might mutate obj wrt namespacing info.
++ id, err := b.idForObj(ctx, obj)
++ if err != nil {
++ return err
++ }
++ // Read the underlying bytes
++ content, err := b.storage.Read(ctx, id)
++ if err != nil {
++ return err
++ }
++ // Get the right content type for the data
++ ct, err := b.storage.ContentType(ctx, id)
++ if err != nil {
++ return err
++ }
++
++ // TODO: Support various decoding options, e.g. defaulting?
++ // TODO: Does this "replace" already-set fields?
++ return b.decoder.DecodeInto(serializer.NewSingleFrameReader(content, ct), obj)
++}
++
++// ListNamespaces lists the available namespaces for the given GroupKind.
++// This function shall only be called for namespaced objects, it is up to
++// the caller to make sure they do not call this method for root-spaced
++// objects; for that the behavior is undefined (but returning an error
++// is recommended).
++func (b *Generic) ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.String, error) {
++ return b.storage.ListNamespaces(ctx, gk)
++}
++
++// ListObjectKeys returns a list of names (with optionally, the namespace).
++// For namespaced GroupKinds, the caller must provide a namespace, and for
++// root-spaced GroupKinds, the caller must not. When namespaced, this function
++// must only return object keys for that given namespace.
++func (b *Generic) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) ([]core.UnversionedObjectID, error) {
++ return b.storage.ListObjectIDs(ctx, gk, namespace)
++}
++
++func (b *Generic) Create(ctx context.Context, obj core.Object) error {
++ // We must never save metadata-only structs
++ if serializer.IsPartialObject(obj) {
++ return ErrCannotSaveMetadata
++ }
++
++ // Get the versioned ID for the given obj. This might mutate obj wrt namespacing info.
++ id, err := b.idForObj(ctx, obj)
++ if err != nil {
++ return err
++ }
++
++ // Do not create it if it already exists
++ if b.storage.Exists(ctx, id) {
++ return core.NewErrAlreadyExists(id)
++ }
++
++ // Validate that the change is ok
++ // TODO: Don't make "upcasting" possible here
++ if b.validator != nil {
++ if err := b.validator.ValidateChange(ctx, b, ChangeOperationCreate, obj); err != nil {
++ return err
++ }
++ }
++
++ // Internal, common write shared with Update()
++ return b.write(ctx, id, obj)
++}
++func (b *Generic) Update(ctx context.Context, obj core.Object) error {
++ // We must never save metadata-only structs
++ if serializer.IsPartialObject(obj) {
++ return ErrCannotSaveMetadata
++ }
++
++ // Get the versioned ID for the given obj. This might mutate obj wrt namespacing info.
++ id, err := b.idForObj(ctx, obj)
++ if err != nil {
++ return err
++ }
++
++ // Require that the object already exists
++ if !b.storage.Exists(ctx, id) {
++ return core.NewErrNotFound(id)
++ }
++
++ // Validate that the change is ok
++ // TODO: Don't make "upcasting" possible here
++ if b.validator != nil {
++ if err := b.validator.ValidateChange(ctx, b, ChangeOperationUpdate, obj); err != nil {
++ return err
++ }
++ }
++
++ // Internal, common write shared with Create()
++ return b.write(ctx, id, obj)
++}
++
++func (b *Generic) UpdateStatus(ctx context.Context, obj core.Object) error {
++ return core.ErrNotImplemented // TODO
++}
++
++func (b *Generic) write(ctx context.Context, id core.ObjectID, obj core.Object) error {
++ // TODO: Figure out how to get ContentType before the object actually exists!
++ ct, err := b.storage.ContentType(ctx, id)
++ if err != nil {
++ return err
++ }
++ // Resolve the desired storage version
++ /* TODO: re-enable later
++ gv, err := b.versioner.StorageVersion(ctx, id)
++ if err != nil {
++ return err
++ }*/
++
++ // Set creationTimestamp if not already populated
++ t := obj.GetCreationTimestamp()
++ if t.IsZero() {
++ obj.SetCreationTimestamp(metav1.Now())
++ }
++
++ var objBytes bytes.Buffer
++ // TODO: Work with any ContentType, not just JSON/YAML. Or, make a SingleFrameWriter for any ct.
++ err = b.encoder.Encode(serializer.NewFrameWriter(ct, &objBytes), obj)
++ if err != nil {
++ return err
++ }
++
++ return b.storage.Write(ctx, id, objBytes.Bytes())
++}
++
++func (b *Generic) Delete(ctx context.Context, obj core.Object) error {
++ // Get the versioned ID for the given obj. This might mutate obj wrt namespacing info.
++ id, err := b.idForObj(ctx, obj)
++ if err != nil {
++ return err
++ }
++
++ // Verify it did exist
++ if !b.storage.Exists(ctx, id) {
++ return core.NewErrNotFound(id)
++ }
++
++ // Validate that the change is ok
++ // TODO: Don't make "upcasting" possible here
++ if b.validator != nil {
++ if err := b.validator.ValidateChange(ctx, b, ChangeOperationDelete, obj); err != nil {
++ return err
++ }
++ }
++
++ // Delete it from the underlying storage
++ return b.storage.Delete(ctx, id)
++}
++
++// Note: This should also work for unstructured and partial metadata objects
++func (b *Generic) idForObj(ctx context.Context, obj core.Object) (core.ObjectID, error) {
++ gvk, err := serializer.GVKForObject(b.scheme, obj)
++ if err != nil {
++ return nil, err
++ }
++
++ // Object must always have .metadata.name set
++ if len(obj.GetName()) == 0 {
++ return nil, ErrNameRequired
++ }
++
++ // Enforce the given namespace policy. This might mutate obj.
++ // TODO: disallow "upcasting" the Lister to a full-blown Storage?
++ if err := b.enforcer.EnforceNamespace(
++ ctx,
++ obj,
++ gvk,
++ b.Storage().Namespacer(),
++ b.Storage(),
++ ); err != nil {
++ return nil, err
++ }
++
++ // At this point we know name is non-empty, and the namespace field is correct,
++ // according to policy
++ return core.NewObjectID(gvk, core.ObjectKeyFromObject(obj)), nil
++}
+diff --git a/pkg/storage/backend/enforcer.go b/pkg/storage/backend/enforcer.go
+new file mode 100644
+index 0000000..8553283
+--- /dev/null
++++ b/pkg/storage/backend/enforcer.go
+@@ -0,0 +1,116 @@
++package backend
++
++import (
++ "context"
++ "errors"
++ "fmt"
++
++ "github.com/weaveworks/libgitops/pkg/storage"
++ "github.com/weaveworks/libgitops/pkg/storage/core"
++)
++
++var (
++ // ErrNoSuchNamespace means that the set of namespaces was searched in the
++ // system, but the requested namespace wasn't in that list.
++ ErrNoSuchNamespace = errors.New("no such namespace in the system")
++)
++
++// NamespaceEnforcer enforces a namespace policy for the Backend.
++type NamespaceEnforcer interface {
++ // EnforceNamespace makes sure that:
++ // a) Any namespaced object has a non-empty namespace field after this call
++ // b) Any non-namespaced object has an empty namespace field after this call
++ // c) The applicable namespace policy of the user's liking is enforced (e.g.
++ // that there are only certain valid namespaces that can be used).
++ //
++ // This call is allowed to mutate obj. gvk represents the GroupVersionKind
++ // of obj. The namespacer can be used to figure out if the given object is
++ // namespaced or not. The given lister might be used to list object IDs,
++ // or existing namespaces in the system.
++ //
++ // See GenericNamespaceEnforcer for an example implementation, or
++ // pkg/storage/kube.NewNamespaceEnforcer() for a sample application.
++ EnforceNamespace(ctx context.Context, obj core.Object, gvk core.GroupVersionKind, namespacer core.Namespacer, lister storage.Lister) error
++}
++
++// GenericNamespaceEnforcer is a NamespaceEnforcer that:
++// a) sets a default namespace for namespaced objects that have
++// the namespace field left empty
++// b) makes sure non-namespaced objects do not have the namespace
++// field set, by pruning any previously-set value.
++// c) if NamespaceGroupKind is non-nil; lists valid Namespace objects
++// in the system (of the given GroupKind); and matches namespaced
++// objects' namespace field against the listed Namespace objects'
++// .metadata.name field.
++//
++// For an example of how to configure this enforcer in the way
++// Kubernetes itself (approximately) does, see pkg/storage/kube.
++// NewNamespaceEnforcer().
++type GenericNamespaceEnforcer struct {
++ // DefaultNamespace describes the default namespace string
++ // that should be set, if a namespaced object's namespace
++ // field is empty.
++ // +required
++ DefaultNamespace string
++ // NamespaceGroupKind describes the GroupKind for Namespace
++ // objects in the system. If non-nil, objects with such
++ // GroupKind are listed, and their .metadata.name is matched
++ // against the current object's namespace field. If nil, any
++ // namespace value is considered valid.
++ // +optional
++ NamespaceGroupKind *core.GroupKind
++}
++
++func (e GenericNamespaceEnforcer) EnforceNamespace(ctx context.Context, obj core.Object, gvk core.GroupVersionKind, namespacer core.Namespacer, lister storage.Lister) error {
++ // Get namespacing info
++ namespaced, err := namespacer.IsNamespaced(gvk.GroupKind())
++ if err != nil {
++ return err
++ }
++
++ // Enforce generic rules
++ ns := obj.GetNamespace()
++ if !namespaced {
++ // If a namespace was set, it must be sanitized, as non-namespaced
++ // resources must have namespace field empty.
++ if len(ns) != 0 {
++ obj.SetNamespace("")
++ }
++ return nil
++ }
++ // The resource is namespaced.
++ // If it is empty, set it to the default namespace.
++ if len(ns) == 0 {
++ // Verify that DefaultNamespace is non-empty
++ if len(e.DefaultNamespace) == 0 {
++ return fmt.Errorf("GenericNamespaceEnforcer.DefaultNamespace is mandatory: %w", core.ErrInvalidParameter)
++ }
++ // Mutate obj and set the namespace field to the default, then return
++ obj.SetNamespace(e.DefaultNamespace)
++ return nil
++ }
++
++ // If the namespace field is set, but NamespaceGroupKind is
++ // nil, it means that any non-empty namespace value is
++ // valid.
++ if e.NamespaceGroupKind == nil {
++ return nil
++ }
++
++ // However, if a Namespace GroupKind was given, look it up using
++ // the lister, and verify its .metadata.name matches the given
++ // namespace value.
++ objIDs, err := lister.ListObjectIDs(ctx, *e.NamespaceGroupKind, "")
++ if err != nil {
++ return err
++ }
++ // Loop through the IDs, and try to match it against the set ns
++ for _, id := range objIDs {
++ if id.ObjectKey().Name == ns {
++ // Found the namespace; this is a valid setting
++ return nil
++ }
++ }
++ // The set namespace doesn't belong to the set of valid namespaces, error
++ return fmt.Errorf("%w: %q", ErrNoSuchNamespace, ns)
++}
+diff --git a/pkg/storage/cache/cache.go b/pkg/storage/cache/cache.go
+deleted file mode 100644
+index 11a4991..0000000
+--- a/pkg/storage/cache/cache.go
++++ /dev/null
+@@ -1,197 +0,0 @@
+-package cache
+-
+-/*
+-
+-TODO: Revisit if we need this file/package in the future.
+-
+-import (
+- log "github.com/sirupsen/logrus"
+- "github.com/weaveworks/libgitops/pkg/runtime"
+- "github.com/weaveworks/libgitops/pkg/serializer"
+- "github.com/weaveworks/libgitops/pkg/storage"
+- "k8s.io/apimachinery/pkg/runtime/schema"
+-)
+-
+-// Cache is an intermediate caching layer, which conforms to Storage
+-// Typically you back the cache with an actual storage
+-type Cache interface {
+- storage.Storage
+- // Flush is used to write the state of the entire cache to storage
+- // Warning: this is a very expensive operation
+- Flush() error
+-}
+-
+-type cache struct {
+- // storage is the backing Storage for the cache
+- // used to look up non-cached Objects
+- storage storage.Storage
+-
+- // index caches the Objects by GroupVersionKind and UID
+- // This guarantees uniqueness when looking up a specific Object
+- index *index
+-}
+-
+-var _ Cache = &cache{}
+-
+-func NewCache(backingStorage storage.Storage) Cache {
+- c := &cache{
+- storage: backingStorage,
+- index: newIndex(backingStorage),
+- }
+-
+- return c
+-}
+-
+-func (s *cache) Serializer() serializer.Serializer {
+- return s.storage.Serializer()
+-}
+-
+-func (c *cache) New(gvk schema.GroupVersionKind) (runtime.Object, error) {
+- // Request the storage to create the Object. The
+- // newly generated Object has not got an UID which
+- // is required for indexing, so just return it
+- // without storing it into the cache
+- return c.storage.New(gvk)
+-}
+-
+-func (c *cache) Get(gvk schema.GroupVersionKind, uid runtime.UID) (obj runtime.Object, err error) {
+- log.Tracef("cache: Get %s with UID %q", gvk.Kind, uid)
+-
+- // If the requested Object resides in the cache, return it
+- if obj, err = c.index.loadByID(gvk, uid); err != nil || obj != nil {
+- return
+- }
+-
+- // Request the Object from the storage
+- obj, err = c.storage.Get(gvk, uid)
+-
+- // If no errors occurred, cache it
+- if err == nil {
+- err = c.index.store(obj)
+- }
+-
+- return
+-}
+-
+-func (c *cache) GetMeta(gvk schema.GroupVersionKind, uid runtime.UID) (obj runtime.Object, err error) {
+- log.Tracef("cache: GetMeta %s with UID %q", gvk.Kind, uid)
+-
+- obj, err = c.storage.GetMeta(gvk, uid)
+-
+- // If no errors occurred while loading, store the Object in the cache
+- if err == nil {
+- err = c.index.storeMeta(obj)
+- }
+-
+- return
+-}
+-
+-func (c *cache) Set(gvk schema.GroupVersionKind, obj runtime.Object) error {
+- log.Tracef("cache: Set %s with UID %q", gvk.Kind, obj.GetUID())
+-
+- // Store the changed Object in the cache
+- if err := c.index.store(obj); err != nil {
+- return err
+- }
+-
+- // TODO: For now the cache always flushes, we might add automatic flushing later
+- return c.storage.Set(gvk, obj)
+-}
+-
+-func (c *cache) Patch(gvk schema.GroupVersionKind, uid runtime.UID, patch []byte) error {
+- // TODO: For now patches are always flushed, the cache will load the updated Object on-demand on access
+- return c.storage.Patch(gvk, uid, patch)
+-}
+-
+-func (c *cache) Delete(gvk schema.GroupVersionKind, uid runtime.UID) error {
+- log.Tracef("cache: Delete %s with UID %q", gvk.Kind, uid)
+-
+- // Delete the given Object from the cache and storage
+- c.index.delete(gvk, uid)
+- return c.storage.Delete(gvk, uid)
+-}
+-
+-type listFunc func(gvk schema.GroupVersionKind) ([]runtime.Object, error)
+-type cacheStoreFunc func([]runtime.Object) error
+-
+-// list is a common handler for List and ListMeta
+-func (c *cache) list(gvk schema.GroupVersionKind, slf, clf listFunc, csf cacheStoreFunc) (objs []runtime.Object, err error) {
+- var storageCount uint64
+- if storageCount, err = c.storage.Count(gvk); err != nil {
+- return
+- }
+-
+- if c.index.count(gvk) != storageCount {
+- log.Tracef("cache: miss when listing: %s", gvk)
+- // If the cache doesn't track all of the Objects, request them from the storage
+- if objs, err = slf(gvk); err != nil {
+- // If no errors occurred, store the Objects in the cache
+- err = csf(objs)
+- }
+- } else {
+- log.Tracef("cache: hit when listing: %s", gvk)
+- // If the cache tracks everything, return the cache's contents
+- objs, err = clf(gvk)
+- }
+-
+- return
+-}
+-
+-func (c *cache) List(gvk schema.GroupVersionKind) ([]runtime.Object, error) {
+- return c.list(gvk, c.storage.List, c.index.list, c.index.storeAll)
+-}
+-
+-func (c *cache) ListMeta(gvk schema.GroupVersionKind) ([]runtime.Object, error) {
+- return c.list(gvk, c.storage.ListMeta, c.index.listMeta, c.index.storeAllMeta)
+-}
+-
+-func (c *cache) Count(gvk schema.GroupVersionKind) (uint64, error) {
+- // The cache is transparent about how many items it has cached
+- return c.storage.Count(gvk)
+-}
+-
+-func (c *cache) Checksum(gvk schema.GroupVersionKind, uid runtime.UID) (string, error) {
+- // The cache is transparent about the checksums
+- return c.storage.Checksum(gvk, uid)
+-}
+-
+-func (c *cache) RawStorage() storage.RawStorage {
+- return c.storage.RawStorage()
+-}
+-
+-func (c *cache) Close() error {
+- return c.storage.Close()
+-}
+-
+-func (c *cache) Flush() error {
+- // Load the entire cache
+- allObjects, err := c.index.loadAll()
+- if err != nil {
+- return err
+- }
+-
+- for _, obj := range allObjects {
+- // Request the storage to save each Object
+- if err := c.storage.Set(obj); err != nil {
+- return err
+- }
+- }
+-
+- return nil
+-}
+-
+-// PartialObjectFrom is used to create a bound PartialObjectImpl from an Object.
+-// Note: This might be useful later (maybe here or maybe in pkg/runtime) if re-enable the cache
+-func PartialObjectFrom(obj Object) (PartialObject, error) {
+- tm, ok := obj.GetObjectKind().(*metav1.TypeMeta)
+- if !ok {
+- return nil, fmt.Errorf("PartialObjectFrom: Cannot cast obj to *metav1.TypeMeta, is %T", obj.GetObjectKind())
+- }
+- om, ok := obj.GetObjectMeta().(*metav1.ObjectMeta)
+- if !ok {
+- return nil, fmt.Errorf("PartialObjectFrom: Cannot cast obj to *metav1.ObjectMeta, is %T", obj.GetObjectMeta())
+- }
+- return &PartialObjectImpl{tm, om}, nil
+-}
+-
+-*/
+diff --git a/pkg/storage/cache/index.go b/pkg/storage/cache/index.go
+deleted file mode 100644
+index 326014f..0000000
+--- a/pkg/storage/cache/index.go
++++ /dev/null
+@@ -1,156 +0,0 @@
+-package cache
+-
+-/*
+-
+-TODO: Revisit if we need this file/package in the future.
+-
+-import (
+- log "github.com/sirupsen/logrus"
+- "github.com/weaveworks/libgitops/pkg/runtime"
+- "github.com/weaveworks/libgitops/pkg/storage"
+- "k8s.io/apimachinery/pkg/runtime/schema"
+-)
+-
+-type index struct {
+- storage storage.Storage
+- objects map[schema.GroupVersionKind]map[runtime.UID]*cacheObject
+-}
+-
+-func newIndex(storage storage.Storage) *index {
+- return &index{
+- storage: storage,
+- objects: make(map[schema.GroupVersionKind]map[runtime.UID]*cacheObject),
+- }
+-}
+-
+-func (i *index) loadByID(gvk schema.GroupVersionKind, uid runtime.UID) (runtime.Object, error) {
+- if uids, ok := i.objects[gvk]; ok {
+- if obj, ok := uids[uid]; ok {
+- log.Tracef("index: cache hit for %s with UID %q", gvk.Kind, uid)
+- return obj.loadFull()
+- }
+- }
+-
+- log.Tracef("index: cache miss for %s with UID %q", gvk.Kind, uid)
+- return nil, nil
+-}
+-
+-func (i *index) loadAll() ([]runtime.Object, error) {
+- var size uint64
+-
+- for gvk := range i.objects {
+- size += i.count(gvk)
+- }
+-
+- all := make([]runtime.Object, 0, size)
+-
+- for gvk := range i.objects {
+- if objects, err := i.list(gvk); err == nil {
+- all = append(all, objects...)
+- } else {
+- return nil, err
+- }
+- }
+-
+- return all, nil
+-}
+-
+-func store(i *index, obj runtime.Object, apiType bool) error {
+- // If store is called for an invalid Object lacking an UID,
+- // panic and print the stack trace. This should never happen.
+- if obj.GetUID() == "" {
+- panic("Attempt to cache invalid Object: missing UID")
+- }
+-
+- co, err := newCacheObject(i.storage, obj, apiType)
+- if err != nil {
+- return err
+- }
+-
+- gvk := co.object.GetObjectKind().GroupVersionKind()
+-
+- if _, ok := i.objects[gvk]; !ok {
+- i.objects[gvk] = make(map[runtime.UID]*cacheObject)
+- }
+-
+- log.Tracef("index: storing %s object with UID %q, meta: %t", gvk.Kind, obj.GetName(), apiType)
+- i.objects[gvk][co.object.GetUID()] = co
+-
+- return nil
+-}
+-
+-func (i *index) store(obj runtime.Object) error {
+- return store(i, obj, false)
+-}
+-
+-func (i *index) storeAll(objs []runtime.Object) (err error) {
+- for _, obj := range objs {
+- if err = i.store(obj); err != nil {
+- break
+- }
+- }
+-
+- return
+-}
+-
+-func (i *index) storeMeta(obj runtime.Object) error {
+- return store(i, obj, true)
+-}
+-
+-func (i *index) storeAllMeta(objs []runtime.Object) (err error) {
+- for _, obj := range objs {
+- if uids, ok := i.objects[obj.GetObjectKind().GroupVersionKind()]; ok {
+- if _, ok := uids[obj.GetUID()]; ok {
+- continue
+- }
+- }
+-
+- if err = i.storeMeta(obj); err != nil {
+- break
+- }
+- }
+-
+- return
+-}
+-
+-func (i *index) delete(gvk schema.GroupVersionKind, uid runtime.UID) {
+- if uids, ok := i.objects[gvk]; ok {
+- delete(uids, uid)
+- }
+-}
+-
+-func (i *index) count(gvk schema.GroupVersionKind) (count uint64) {
+- count = uint64(len(i.objects[gvk]))
+- log.Tracef("index: counted %d %s object(s)", count, gvk.Kind)
+- return
+-}
+-
+-func list(i *index, gvk schema.GroupVersionKind, apiTypes bool) ([]runtime.Object, error) {
+- uids := i.objects[gvk]
+- list := make([]runtime.Object, 0, len(uids))
+-
+- log.Tracef("index: listing %s objects, meta: %t", gvk, apiTypes)
+- for _, obj := range uids {
+- loadFunc := obj.loadFull
+- if apiTypes {
+- loadFunc = obj.loadAPI
+- }
+-
+- if result, err := loadFunc(); err != nil {
+- return nil, err
+- } else {
+- list = append(list, result)
+- }
+- }
+-
+- return list, nil
+-}
+-
+-func (i *index) list(gvk schema.GroupVersionKind) ([]runtime.Object, error) {
+- return list(i, gvk, false)
+-}
+-
+-func (i *index) listMeta(gvk schema.GroupVersionKind) ([]runtime.Object, error) {
+- return list(i, gvk, true)
+-}
+-*/
+diff --git a/pkg/storage/cache/object.go b/pkg/storage/cache/object.go
+deleted file mode 100644
+index c0e807c..0000000
+--- a/pkg/storage/cache/object.go
++++ /dev/null
+@@ -1,96 +0,0 @@
+-package cache
+-
+-/*
+-
+-TODO: Revisit if we need this file/package in the future.
+-
+-import (
+- log "github.com/sirupsen/logrus"
+- "github.com/weaveworks/libgitops/pkg/runtime"
+- "github.com/weaveworks/libgitops/pkg/storage"
+-)
+-
+-type cacheObject struct {
+- storage storage.Storage
+- object runtime.Object
+- checksum string
+- apiType bool
+-}
+-
+-func newCacheObject(s storage.Storage, object runtime.Object, apiType bool) (c *cacheObject, err error) {
+- c = &cacheObject{
+- storage: s,
+- object: object,
+- apiType: apiType,
+- }
+-
+- if c.checksum, err = s.Checksum(c.object.GroupVersionKind(), c.object.GetUID()); err != nil {
+- c = nil
+- }
+-
+- return
+-}
+-
+-// loadFull returns the full Object, loading it only if it hasn't been cached before or the checksum has changed
+-func (c *cacheObject) loadFull() (runtime.Object, error) {
+- var checksum string
+- reload := c.apiType
+-
+- if !reload {
+- if chk, err := c.storage.Checksum(c.object.GroupVersionKind(), c.object.GetUID()); err != nil {
+- return nil, err
+- } else if chk != c.checksum {
+- log.Tracef("cacheObject: %q invalidated, checksum mismatch: %q -> %q", c.object.GetName(), c.checksum, chk)
+- checksum = chk
+- reload = true
+- } else {
+- log.Tracef("cacheObject: %q checksum: %q", c.object.GetName(), c.checksum)
+- }
+- }
+-
+- if reload {
+- log.Tracef("cacheObject: full load triggered for %q", c.object.GetName())
+- obj, err := c.storage.Get(c.object.GroupVersionKind(), c.object.GetUID())
+- if err != nil {
+- return nil, err
+- }
+-
+- // Only apply the change after a successful Get
+- c.object = obj
+- c.apiType = false
+-
+- if len(checksum) > 0 {
+- c.checksum = checksum
+- }
+- }
+-
+- return c.object, nil
+-}
+-
+-// loadAPI returns the APIType of the Object, loading it only if the checksum has changed
+-func (c *cacheObject) loadAPI() (runtime.Object, error) {
+- if chk, err := c.storage.Checksum(c.object.GroupVersionKind(), c.object.GetUID()); err != nil {
+- return nil, err
+- } else if chk != c.checksum {
+- log.Tracef("cacheObject: %q invalidated, checksum mismatch: %q -> %q", c.object.GetName(), c.checksum, chk)
+- log.Tracef("cacheObject: API load triggered for %q", c.object.GetName())
+- obj, err := c.storage.GetMeta(c.object.GroupVersionKind(), c.object.GetUID())
+- if err != nil {
+- return nil, err
+- }
+-
+- // Only apply the change after a successful GetMeta
+- c.object = obj
+- c.checksum = chk
+- c.apiType = true
+- } else {
+- log.Tracef("cacheObject: %q checksum: %q", c.object.GetName(), c.checksum)
+- }
+-
+- if c.apiType {
+- return c.object, nil
+- }
+-
+- return runtime.PartialObjectFrom(c.object), nil
+-}
+-*/
+diff --git a/pkg/storage/client/client.go b/pkg/storage/client/client.go
+new file mode 100644
+index 0000000..9c216a0
+--- /dev/null
++++ b/pkg/storage/client/client.go
+@@ -0,0 +1,315 @@
++package client
++
++import (
++ "context"
++ "errors"
++ "fmt"
++
++ "github.com/weaveworks/libgitops/pkg/filter"
++ "github.com/weaveworks/libgitops/pkg/serializer"
++ "github.com/weaveworks/libgitops/pkg/storage/backend"
++ "github.com/weaveworks/libgitops/pkg/storage/core"
++ patchutil "github.com/weaveworks/libgitops/pkg/util/patch"
++ syncutil "github.com/weaveworks/libgitops/pkg/util/sync"
++ "k8s.io/apimachinery/pkg/api/meta"
++ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
++ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
++ kruntime "k8s.io/apimachinery/pkg/runtime"
++ utilerrs "k8s.io/apimachinery/pkg/util/errors"
++ "k8s.io/apimachinery/pkg/util/sets"
++ "sigs.k8s.io/controller-runtime/pkg/client"
++)
++
++// TODO: Pass an ObjectID that contains all PartialObjectMetadata info for "downstream" consumers
++// that can make use of it by "casting up".
++
++var (
++ // ErrUnsupportedPatchType is returned when an unsupported patch type is used
++ ErrUnsupportedPatchType = errors.New("unsupported patch type")
++)
++
++type Reader interface {
++ client.Reader
++ BackendReader() backend.Reader
++}
++
++type Writer interface {
++ client.Writer
++ BackendWriter() backend.Writer
++}
++
++type StatusClient interface {
++ client.StatusClient
++ BackendStatusWriter() backend.StatusWriter
++}
++
++// Client is an interface for persisting and retrieving API objects to/from a backend
++// One Client instance handles all different Kinds of Objects
++type Client interface {
++ Reader
++ Writer
++ // TODO: StatusClient
++ //client.Client
++}
++
++// NewGeneric constructs a new Generic client
++// TODO: Construct the default patcher from the given scheme, make patcher an opt instead
++func NewGeneric(backend backend.Backend, patcher serializer.Patcher) (*Generic, error) {
++ if backend == nil {
++ return nil, fmt.Errorf("backend is mandatory")
++ }
++ return &Generic{backend, patcher}, nil
++}
++
++// Generic implements the Client interface
++type Generic struct {
++ backend backend.Backend
++ patcher serializer.Patcher
++}
++
++var _ Client = &Generic{}
++
++func (c *Generic) Backend() backend.Backend { return c.backend }
++func (c *Generic) BackendReader() backend.Reader { return c.backend }
++func (c *Generic) BackendWriter() backend.Writer { return c.backend }
++
++// Get returns a new Object for the resource at the specified kind/uid path, based on the file content.
++// In order to only extract the metadata of this object, pass in a *metav1.PartialObjectMetadata
++func (c *Generic) Get(ctx context.Context, key core.ObjectKey, obj core.Object) error {
++ obj.SetName(key.Name)
++ obj.SetNamespace(key.Namespace)
++
++ return c.backend.Get(ctx, obj)
++}
++
++// List lists Objects for the specific kind. Optionally, filters can be applied (see the filter package
++// for more information, e.g. filter.NameFilter{} and filter.UIDFilter{})
++// You can also pass in an *unstructured.UnstructuredList to get an unknown type's data or
++// *metav1.PartialObjectMetadataList to just get the metadata of all objects of the specified gvk.
++// If you do specify either an *unstructured.UnstructuredList or *metav1.PartialObjectMetadataList,
++// you need to populate TypeMeta with the GVK you want back.
++// TODO: Check if this works with metav1.List{}
++// TODO: Create constructors for the different kinds of lists?
++func (c *Generic) List(ctx context.Context, list core.ObjectList, opts ...client.ListOption) error {
++ // This call will verify that list actually is a List type.
++ gvk, err := serializer.GVKForList(list, c.Backend().Scheme())
++ if err != nil {
++ return err
++ }
++ // This applies both upstream and custom options
++ listOpts := (&ListOptions{}).ApplyOptions(opts)
++
++ // Get namespacing info
++ gk := gvk.GroupKind()
++ namespaced, err := c.Backend().Storage().Namespacer().IsNamespaced(gk)
++ if err != nil {
++ return err
++ }
++
++ // By default, only search the given namespace. It is fully valid for this to be an
++ // empty string: it is the only
++ namespaces := sets.NewString(listOpts.Namespace)
++ // However, if the GroupKind is namespaced, and the given "filter namespace" in list
++ // options is empty, it means that one should list all namespaces
++ if namespaced && listOpts.Namespace == "" {
++ namespaces, err = c.Backend().ListNamespaces(ctx, gk)
++ if err != nil {
++ return err
++ }
++ } else if !namespaced && listOpts.Namespace != "" {
++ return errors.New("invalid namespace option: cannot filter namespace for root-spaced object")
++ }
++
++ allIDs := []core.UnversionedObjectID{}
++ for ns := range namespaces {
++ ids, err := c.Backend().ListObjectIDs(ctx, gk, ns)
++ if err != nil {
++ return err
++ }
++ allIDs = append(allIDs, ids...)
++ }
++
++ // Populate objs through the given (non-buffered) channel
++ ch := make(chan core.Object)
++ objs := make([]kruntime.Object, 0, len(allIDs))
++
++ // How should the object be created?
++ createFunc := createObject(gvk, c.Backend().Scheme())
++ if serializer.IsPartialObjectList(list) {
++ createFunc = createPartialObject(gvk)
++ } else if serializer.IsUnstructuredList(list) {
++ createFunc = createUnstructuredObject(gvk)
++ }
++ // Temporary processing goroutine; execution starts instantly
++ m := syncutil.RunMonitor(func() error {
++ return c.processKeys(ctx, allIDs, &listOpts.FilterOptions, createFunc, ch)
++ })
++
++ for o := range ch {
++ objs = append(objs, o)
++ }
++
++ if err := m.Wait(); err != nil {
++ return err
++ }
++
++ // Populate the List's Items field with the objects returned
++ return meta.SetList(list, objs)
++}
++
++func (c *Generic) Create(ctx context.Context, obj core.Object, _ ...client.CreateOption) error {
++ return c.backend.Create(ctx, obj)
++}
++
++func (c *Generic) Update(ctx context.Context, obj core.Object, _ ...client.UpdateOption) error {
++ return c.backend.Update(ctx, obj)
++}
++
++// Patch performs a strategic merge patch on the object with the given UID, using the byte-encoded patch given
++func (c *Generic) Patch(ctx context.Context, obj core.Object, patch core.Patch, _ ...client.PatchOption) error {
++ // Fail-fast: We must never save metadata-only structs
++ if serializer.IsPartialObject(obj) {
++ return backend.ErrCannotSaveMetadata
++ }
++
++ // Acquire the patch data from the "desired state" object given now, i.e. in MergeFrom{}
++ // TODO: Shall we require GVK to be present here using a meta interpreter?
++ patchJSON, err := patch.Data(obj)
++ if err != nil {
++ return err
++ }
++
++ // Load the current latest state into obj temporarily, before patching it
++ // This also validates the GVK, name and namespace.
++ if err := c.backend.Get(ctx, obj); err != nil {
++ return err
++ }
++
++ // Get the right BytePatcher for this patch type
++ // TODO: Make this return an error
++ bytePatcher := patchutil.BytePatcherForType(patch.Type())
++ if bytePatcher == nil {
++ return fmt.Errorf("patch type not supported: %s", patch.Type())
++ }
++
++ // Apply the patch into the object using the given byte patcher
++ if unstruct, ok := obj.(kruntime.Unstructured); ok {
++ // TODO: Provide an option for the schema
++ err = c.patcher.ApplyOnUnstructured(bytePatcher, patchJSON, unstruct, nil)
++ } else {
++ err = c.patcher.ApplyOnStruct(bytePatcher, patchJSON, obj)
++ }
++ if err != nil {
++ return err
++ }
++
++ // Perform an update internally, similar to what .Update would yield
++ // TODO: Maybe write to the Storage conditionally? using DryRun all
++ return c.Update(ctx, obj)
++}
++
++// Delete removes an Object from the backend
++// PartialObjectMetadata should work here.
++func (c *Generic) Delete(ctx context.Context, obj core.Object, _ ...client.DeleteOption) error {
++ return c.backend.Delete(ctx, obj)
++}
++
++// DeleteAllOf deletes all matched resources by first doing a List() operation on the given GVK of
++// obj (obj is not used for anything else) and the given filters in opts. Only the Partial Meta
++func (c *Generic) DeleteAllOf(ctx context.Context, obj core.Object, opts ...client.DeleteAllOfOption) error {
++ // This applies both upstream and custom options, and propagates the options correctly to both
++ // List() and Delete()
++ customDeleteAllOpts := (&DeleteAllOfOptions{}).ApplyOptions(opts)
++
++ // Get the GVK of the object
++ gvk, err := serializer.GVKForObject(c.Backend().Scheme(), obj)
++ if err != nil {
++ return err
++ }
++
++ // List all matched objects for the given ListOptions, and GVK.
++ // UnstructuredList is used here so that we can use filters that operate on fields
++ list := &unstructured.UnstructuredList{}
++ list.SetGroupVersionKind(gvk)
++ if err := c.List(ctx, list, customDeleteAllOpts); err != nil {
++ return err
++ }
++
++ // Loop through all of the matched items, and Delete them one-by-one
++ for i := range list.Items {
++ if err := c.Delete(ctx, &list.Items[i], customDeleteAllOpts); err != nil {
++ return err
++ }
++ }
++ return nil
++}
++
++// Scheme returns the scheme this client is using.
++func (c *Generic) Scheme() *kruntime.Scheme {
++ return c.backend.Scheme()
++}
++
++// RESTMapper returns the rest this client is using. For now, this returns nil, so don't use.
++func (c *Generic) RESTMapper() meta.RESTMapper {
++ return nil
++}
++
++type newObjectFunc func() (core.Object, error)
++
++func createObject(gvk core.GroupVersionKind, scheme *kruntime.Scheme) newObjectFunc {
++ return func() (core.Object, error) {
++ return NewObjectForGVK(gvk, scheme)
++ }
++}
++
++func createPartialObject(gvk core.GroupVersionKind) newObjectFunc {
++ return func() (core.Object, error) {
++ obj := &metav1.PartialObjectMetadata{}
++ obj.SetGroupVersionKind(gvk)
++ return obj, nil
++ }
++}
++
++func createUnstructuredObject(gvk core.GroupVersionKind) newObjectFunc {
++ return func() (core.Object, error) {
++ obj := &unstructured.Unstructured{}
++ obj.SetGroupVersionKind(gvk)
++ return obj, nil
++ }
++}
++
++func (c *Generic) processKeys(ctx context.Context, ids []core.UnversionedObjectID, filterOpts *filter.FilterOptions, fn newObjectFunc, output chan core.Object) error {
++ goroutines := []func() error{}
++ for _, id := range ids {
++ goroutines = append(goroutines, c.processKey(ctx, id, filterOpts, fn, output))
++ }
++
++ defer close(output)
++
++ return utilerrs.AggregateGoroutines(goroutines...)
++}
++
++func (c *Generic) processKey(ctx context.Context, id core.UnversionedObjectID, filterOpts *filter.FilterOptions, fn newObjectFunc, output chan core.Object) func() error {
++ return func() error {
++ // Create a new object, and decode into it using Get
++ obj, err := fn()
++ if err != nil {
++ return err
++ }
++
++ if err := c.Get(ctx, id.ObjectKey(), obj); err != nil {
++ return err
++ }
++
++ // Match the object against the filters
++ matched, err := filterOpts.Match(obj)
++ if err != nil {
++ return err
++ }
++ if matched {
++ output <- obj
++ }
++
++ return nil
++ }
++}
+diff --git a/pkg/storage/client/options.go b/pkg/storage/client/options.go
+new file mode 100644
+index 0000000..7fa8f8e
+--- /dev/null
++++ b/pkg/storage/client/options.go
+@@ -0,0 +1,75 @@
++package client
++
++import (
++ "github.com/weaveworks/libgitops/pkg/filter"
++ "sigs.k8s.io/controller-runtime/pkg/client"
++)
++
++type ListOption interface {
++ client.ListOption
++ filter.FilterOption
++}
++
++type ListOptions struct {
++ client.ListOptions
++ filter.FilterOptions
++}
++
++var _ ListOption = &ListOptions{}
++
++func (o *ListOptions) ApplyToList(target *client.ListOptions) {
++ o.ListOptions.ApplyToList(target)
++}
++
++func (o *ListOptions) ApplyToFilterOptions(target *filter.FilterOptions) {
++ o.FilterOptions.ApplyToFilterOptions(target)
++}
++
++func (o *ListOptions) ApplyOptions(opts []client.ListOption) *ListOptions {
++ // Apply the "normal" ListOptions
++ o.ListOptions.ApplyOptions(opts)
++ // Apply all FilterOptions, if they implement that interface
++ for _, opt := range opts {
++ o.FilterOptions.ApplyOption(opt)
++ }
++
++ // If listOpts.Namespace was given, add it to the list of ObjectFilters
++ if len(o.Namespace) != 0 {
++ o.ObjectFilters = append(o.ObjectFilters, filter.NamespaceFilter{Namespace: o.Namespace})
++ }
++ // If listOpts.LabelSelector was given, add it to the list of ObjectFilters
++ if o.LabelSelector != nil {
++ o.ObjectFilters = append(o.ObjectFilters, filter.LabelsFilter{LabelSelector: o.LabelSelector})
++ }
++
++ return o
++}
++
++type DeleteAllOfOption interface {
++ ListOption
++ client.DeleteAllOfOption
++}
++
++type DeleteAllOfOptions struct {
++ ListOptions
++ client.DeleteOptions
++}
++
++var _ DeleteAllOfOption = &DeleteAllOfOptions{}
++
++func (o *DeleteAllOfOptions) ApplyToDeleteAllOf(target *client.DeleteAllOfOptions) {
++ o.DeleteOptions.ApplyToDelete(&target.DeleteOptions)
++}
++
++func (o *DeleteAllOfOptions) ApplyOptions(opts []client.DeleteAllOfOption) *DeleteAllOfOptions {
++ // Cannot directly apply to o, hence, create a temporary object to which upstream opts are applied
++ do := (&client.DeleteAllOfOptions{}).ApplyOptions(opts)
++ o.ListOptions.ListOptions = do.ListOptions
++ o.DeleteOptions = do.DeleteOptions
++
++ // Apply all FilterOptions, if they implement that interface
++ for _, opt := range opts {
++ o.FilterOptions.ApplyOption(opt)
++ }
++ return o
++}
+diff --git a/pkg/storage/client/transactional/client.go b/pkg/storage/client/transactional/client.go
+new file mode 100644
+index 0000000..1108c1d
+--- /dev/null
++++ b/pkg/storage/client/transactional/client.go
+@@ -0,0 +1,330 @@
++package transactional
++
++import (
++ "context"
++ "crypto/rand"
++ "encoding/hex"
++ "fmt"
++ "strings"
++ "sync"
++ "sync/atomic"
++
++ "github.com/sirupsen/logrus"
++ "github.com/weaveworks/libgitops/pkg/storage/backend"
++ "github.com/weaveworks/libgitops/pkg/storage/client"
++ "github.com/weaveworks/libgitops/pkg/storage/core"
++ utilerrs "k8s.io/apimachinery/pkg/util/errors"
++)
++
++var _ Client = &Generic{}
++
++func NewGeneric(c client.Client, manager BranchManager, merger BranchMerger) (Client, error) {
++ if c == nil {
++ return nil, fmt.Errorf("%w: c is required", core.ErrInvalidParameter)
++ }
++ if manager == nil {
++ return nil, fmt.Errorf("%w: manager is required", core.ErrInvalidParameter)
++ }
++ return &Generic{
++ c: c,
++ txs: make(map[string]*txLock),
++ txsMu: &sync.Mutex{},
++ manager: manager,
++ merger: merger,
++ }, nil
++}
++
++type Generic struct {
++ c client.Client
++
++ txs map[string]*txLock
++ txsMu *sync.Mutex
++
++ // +optional
++ merger BranchMerger
++ // +required
++ manager BranchManager
++}
++
++type txLock struct {
++ mu *sync.RWMutex
++ mode TxMode
++ // active == 1 means "transaction active, mu is locked for writing"
++ // active == 0 means "transaction has stopped, mu has been unlocked"
++ active uint32
++}
++
++func (c *Generic) Get(ctx context.Context, key core.ObjectKey, obj core.Object) error {
++ return c.lockForReading(ctx, func() error {
++ return c.c.Get(ctx, key, obj)
++ })
++}
++
++func (c *Generic) List(ctx context.Context, list core.ObjectList, opts ...core.ListOption) error {
++ return c.lockForReading(ctx, func() error {
++ return c.c.List(ctx, list, opts...)
++ })
++}
++
++func (c *Generic) lockForReading(ctx context.Context, operation func() error) error {
++ ref := core.GetVersionRef(ctx)
++ if !ref.IsWritable() {
++ // Never block reads for read-only VersionRefs. We know nobody can change
++ // them during the read operation, so they should be race condition-free.
++ return operation()
++ }
++ // If the VersionRef is writable; treat it as a branch and lock it to avoid
++ // race conditions.
++ return c.lockAndReadBranch(ref.String(), operation)
++}
++
++func (c *Generic) lockAndReadBranch(branch string, callback func() error) error {
++ // Use c.txsMu to guard reads and writes to the c.txs map
++ c.txsMu.Lock()
++ // Check if information about a transaction on this branch exists.
++ txState, ok := c.txs[branch]
++ if !ok {
++ // grow the txs map by one
++ c.txs[branch] = &txLock{
++ mu: &sync.RWMutex{},
++ }
++ txState = c.txs[branch]
++ }
++ c.txsMu.Unlock()
++
++ // In the atomic mode, we lock the txLock during the read,
++ // so no new transactions can be started while the read
++ // operation goes on. In non-atomic modes, reads aren't locked,
++ // instead it is assumed that downstream implementations just
++ // read the latest commit on the given branch.
++ if txState.mode == TxModeAtomic {
++ txState.mu.RLock()
++ }
++ err := callback()
++ if txState.mode == TxModeAtomic {
++ txState.mu.RUnlock()
++ }
++ return err
++}
++
++func (c *Generic) initTx(ctx context.Context, info TxInfo) (context.Context, txFunc) {
++ // Aquire the tx-specific lock
++ c.txsMu.Lock()
++ txState, ok := c.txs[info.Head]
++ if !ok {
++ // grow the txs map by one
++ c.txs[info.Head] = &txLock{
++ mu: &sync.RWMutex{},
++ }
++ txState = c.txs[info.Head]
++ }
++ txState.mode = info.Options.Mode
++ c.txsMu.Unlock()
++
++ // Wait for all reads to complete (in the case of the atomic more),
++ // and then lock for writing. For non-atomic mode this uses the mutex
++ // as it is modifying txState, and two transactions must not run at
++ // the same time for the same branch.
++ //
++ // Always lock mu when a transaction is running on this branch,
++ // regardless of mode. If atomic mode is enabled, this also waits
++ // on any reads happening at this moment. For all modes, this ensures
++ // transactions happen in order.
++ txState.mu.Lock()
++ txState.active = 1 // set tx state to "active"
++
++ // Create a child context with a timeout
++ dlCtx, cleanupTimeout := context.WithTimeout(ctx, info.Options.Timeout)
++
++ // This function cleans up the transaction, and unlocks the tx muted
++ cleanupFunc := func() error {
++ // Cleanup after the transaction
++ if err := c.cleanupAfterTx(ctx, &info); err != nil {
++ return fmt.Errorf("Failed to cleanup branch %s after tx: %v", info.Head, err)
++ }
++ // Unlock the mutex so new transactions can take place on this branch
++ txState.mu.Unlock()
++ return nil
++ }
++
++ // Start waiting for the cancellation of the deadline context.
++ go func() {
++ // Wait for the context to either timeout or be cancelled
++ <-dlCtx.Done()
++ // This guard makes sure the cleanup function runs exactly
++ // once, regardless of transaction end cause.
++ if atomic.CompareAndSwapUint32(&txState.active, 1, 0) {
++ if err := cleanupFunc(); err != nil {
++ logrus.Errorf("Failed to cleanup after tx timeout: %v", err)
++ }
++ }
++ }()
++
++ abortFunc := func() error {
++ // The transaction ended; the caller is either Abort() or
++ // at the end of a successful transaction. The cause of
++ // Abort() happening can also be a context cancellation.
++ // If the parent context was cancelled or timed out; this
++ // function and the above function race to set active => 0
++ // Regardless, due to the atomic nature of the operation,
++ // cleanupFunc() will only be run twice.
++ if atomic.CompareAndSwapUint32(&txState.active, 1, 0) {
++ // We can now stop the timeout timer
++ cleanupTimeout()
++ // Clean up the transaction
++ return cleanupFunc()
++ }
++ return nil
++ }
++
++ return dlCtx, abortFunc
++}
++
++func (c *Generic) cleanupAfterTx(ctx context.Context, info *TxInfo) error {
++ // Always both clean the branch, and run post-tx tasks
++ return utilerrs.NewAggregate([]error{
++ c.manager.ResetToCleanBranch(ctx, info.Base),
++ // TODO: should this be in its own goroutine to switch back to main
++ // ASAP?
++ c.manager.TransactionHookChain().PostTransactionHook(ctx, *info),
++ })
++}
++
++func (c *Generic) BackendReader() backend.Reader {
++ return c.c.BackendReader()
++}
++
++func (c *Generic) BranchMerger() BranchMerger {
++ return c.merger
++}
++
++func (c *Generic) BranchManager() BranchManager {
++ return c.manager
++}
++
++func (c *Generic) Transaction(ctx context.Context, opts ...TxOption) Tx {
++ tx, err := c.transaction(ctx, opts...)
++ if err != nil {
++ panic(err)
++ }
++ return tx
++}
++
++func (c *Generic) BranchTransaction(ctx context.Context, headBranch string, opts ...TxOption) BranchTx {
++ tx, err := c.branchTransaction(ctx, headBranch, opts...)
++ if err != nil {
++ panic(err)
++ }
++ return tx
++}
++
++func (c *Generic) validateCtx(ctx context.Context) (core.VersionRef, error) {
++ // Check so versionref is writable
++ ref := core.GetVersionRef(ctx)
++ if !ref.IsWritable() {
++ return nil, fmt.Errorf("must not give a writable VersionRef to (Branch)Transaction()")
++ }
++ // Just return its
++ return ref, nil
++}
++
++func (c *Generic) transaction(ctx context.Context, opts ...TxOption) (Tx, error) {
++ // Validate the versionref from the context
++ ref, err := c.validateCtx(ctx)
++ if err != nil {
++ return nil, err
++ }
++
++ // Parse options
++ o := defaultTxOptions().ApplyOptions(opts)
++
++ branch := ref.String()
++ info := TxInfo{
++ Base: branch,
++ Head: branch,
++ Options: *o,
++ }
++ // Initialize the transaction
++ ctxWithDeadline, cleanupFunc := c.initTx(ctx, info)
++
++ // Run pre-tx checks
++ err = c.manager.TransactionHookChain().PreTransactionHook(ctxWithDeadline, info)
++
++ return &txImpl{
++ &txCommon{
++ err: err,
++ c: c.c,
++ manager: c.manager,
++ ctx: ctxWithDeadline,
++ info: info,
++ cleanupFunc: cleanupFunc,
++ },
++ }, nil
++}
++
++func (c *Generic) branchTransaction(ctx context.Context, headBranch string, opts ...TxOption) (BranchTx, error) {
++ // Validate the versionref from the context
++ ref, err := c.validateCtx(ctx)
++ if err != nil {
++ return nil, err
++ }
++ baseBranch := ref.String()
++
++ // Append random bytes to the end of the head branch if it ends with a dash
++ if strings.HasSuffix(headBranch, "-") {
++ suffix, err := randomSHA(4)
++ if err != nil {
++ return nil, err
++ }
++ headBranch += suffix
++ }
++
++ // Validate that the base and head branches are distinct
++ if baseBranch == headBranch {
++ return nil, fmt.Errorf("head and target branches must not be the same")
++ }
++
++ logrus.Debugf("Base branch: %q. Head branch: %q.", baseBranch, headBranch)
++
++ // Parse options
++ o := defaultTxOptions().ApplyOptions(opts)
++
++ info := TxInfo{
++ Base: baseBranch,
++ Head: headBranch,
++ Options: *o,
++ }
++
++ // Register the head branch with the context
++ ctxWithHeadBranch := core.WithVersionRef(ctx, core.NewBranchRef(headBranch))
++ // Initialize the transaction
++ ctxWithDeadline, cleanupFunc := c.initTx(ctxWithHeadBranch, info)
++
++ // Run pre-tx checks and create the new branch
++ err = utilerrs.NewAggregate([]error{
++ c.manager.TransactionHookChain().PreTransactionHook(ctxWithDeadline, info),
++ c.manager.CreateBranch(ctxWithDeadline, headBranch),
++ })
++
++ return &txBranchImpl{
++ txCommon: &txCommon{
++ err: err,
++ c: c.c,
++ manager: c.manager,
++ ctx: ctxWithDeadline,
++ info: info,
++ cleanupFunc: cleanupFunc,
++ },
++ merger: c.merger,
++ }, nil
++}
++
++// randomSHA returns a hex-encoded string from {byteLen} random bytes.
++func randomSHA(byteLen int) (string, error) {
++ b := make([]byte, byteLen)
++ _, err := rand.Read(b)
++ if err != nil {
++ return "", err
++ }
++ return hex.EncodeToString(b), nil
++}
+diff --git a/pkg/storage/client/transactional/commit.go b/pkg/storage/client/transactional/commit.go
+new file mode 100644
+index 0000000..eeb5e9f
+--- /dev/null
++++ b/pkg/storage/client/transactional/commit.go
+@@ -0,0 +1,126 @@
++package transactional
++
++import (
++ "fmt"
++
++ "github.com/fluxcd/go-git-providers/validation"
++)
++
++// Commit describes a result of a transaction.
++type Commit interface {
++ // GetAuthor describes the author of this commit.
++ // +required
++ GetAuthor() CommitAuthor
++ // GetMessage describes the change in this commit.
++ // +required
++ GetMessage() CommitMessage
++ // Validate validates that all required fields are set, and given data is valid.
++ Validate() error
++}
++
++type CommitAuthor interface {
++ // GetName describes the author's name (e.g. as per git config)
++ // +required
++ GetName() string
++ // GetEmail describes the author's email (e.g. as per git config).
++ // It is optional generally, but might be required by some specific
++ // implementations.
++ // +optional
++ GetEmail() string
++ // The String() method must return a (ideally both human- and machine-
++ // readable) concatenated string including the name and email (if
++ // applicable) of the author.
++ fmt.Stringer
++}
++
++type CommitMessage interface {
++ // GetTitle describes the change concisely, so it can be used e.g. as
++ // a commit message or PR title. Certain implementations might enforce
++ // character limits on this string.
++ // +required
++ GetTitle() string
++ // GetDescription contains optional extra, more detailed information
++ // about the change.
++ // +optional
++ GetDescription() string
++ // The String() method must return a (ideally both human- and machine-
++ // readable) concatenated string including the title and description
++ // (if applicable) of the author.
++ fmt.Stringer
++}
++
++// GenericCommitResult implements Commit.
++var _ Commit = GenericCommit{}
++
++// GenericCommit implements Commit.
++type GenericCommit struct {
++ // GetAuthor describes the author of this commit.
++ // +required
++ Author CommitAuthor
++ // GetMessage describes the change in this commit.
++ // +required
++ Message CommitMessage
++}
++
++func (r GenericCommit) GetAuthor() CommitAuthor { return r.Author }
++func (r GenericCommit) GetMessage() CommitMessage { return r.Message }
++
++func (r GenericCommit) Validate() error {
++ v := validation.New("GenericCommit")
++ if len(r.Author.GetName()) == 0 {
++ v.Required("Author.GetName")
++ }
++ if len(r.Message.GetTitle()) == 0 {
++ v.Required("Message.GetTitle")
++ }
++ return v.Error()
++}
++
++// GenericCommitAuthor implements CommitAuthor.
++var _ CommitAuthor = GenericCommitAuthor{}
++
++// GenericCommit implements Commit.
++type GenericCommitAuthor struct {
++ // Name describes the author's name (as per git config)
++ // +required
++ Name string
++ // Email describes the author's email (as per git config)
++ // +optional
++ Email string
++}
++
++func (r GenericCommitAuthor) GetName() string { return r.Name }
++func (r GenericCommitAuthor) GetEmail() string { return r.Email }
++
++func (r GenericCommitAuthor) String() string {
++ if len(r.Email) != 0 {
++ return fmt.Sprintf("%s <%s>", r.Name, r.Email)
++ }
++ return r.Name
++}
++
++// GenericCommitMessage implements CommitMessage.
++var _ CommitMessage = GenericCommitMessage{}
++
++// GenericCommitMessage implements CommitMessage.
++type GenericCommitMessage struct {
++ // Title describes the change concisely, so it can be used e.g. as
++ // a commit message or PR title. Certain implementations might enforce
++ // character limits on this string.
++ // +required
++ Title string
++ // Description contains optional extra, more detailed information
++ // about the change.
++ // +optional
++ Description string
++}
++
++func (r GenericCommitMessage) GetTitle() string { return r.Title }
++func (r GenericCommitMessage) GetDescription() string { return r.Description }
++
++func (r GenericCommitMessage) String() string {
++ if len(r.Description) != 0 {
++ return fmt.Sprintf("%s\n\n%s", r.Title, r.Description)
++ }
++ return r.Title
++}
+diff --git a/pkg/storage/client/transactional/distributed/client.go b/pkg/storage/client/transactional/distributed/client.go
+new file mode 100644
+index 0000000..665c6fd
+--- /dev/null
++++ b/pkg/storage/client/transactional/distributed/client.go
+@@ -0,0 +1,313 @@
++package distributed
++
++import (
++ "context"
++ "fmt"
++ "sync"
++ "time"
++
++ "github.com/sirupsen/logrus"
++ "github.com/weaveworks/libgitops/pkg/storage/client/transactional"
++ "github.com/weaveworks/libgitops/pkg/storage/core"
++ "k8s.io/apimachinery/pkg/util/wait"
++)
++
++// NewClient creates a new distributed Client using the given underlying transactional Client,
++// remote, and options that configure how the Client should respond to network partitions.
++func NewClient(c transactional.Client, remote Remote, opts ...ClientOption) (*Generic, error) {
++ if c == nil {
++ return nil, fmt.Errorf("%w: c is mandatory", core.ErrInvalidParameter)
++ }
++ if remote == nil {
++ return nil, fmt.Errorf("%w: remote is mandatory", core.ErrInvalidParameter)
++ }
++
++ o := defaultOptions().ApplyOptions(opts)
++
++ g := &Generic{
++ Client: c,
++ remote: remote,
++ opts: *o,
++ branchLocks: make(map[string]*branchLock),
++ branchLocksMu: &sync.Mutex{},
++ }
++
++ // Register ourselves to hook into the branch manager's operations
++ c.BranchManager().CommitHookChain().Register(g)
++ c.BranchManager().TransactionHookChain().Register(g)
++
++ return g, nil
++}
++
++type Generic struct {
++ transactional.Client
++ remote Remote
++ opts ClientOptions
++ // branchLocks maps a given branch to a given lock the state of the branch
++ branchLocks map[string]*branchLock
++ // branchLocksMu guards branchLocks
++ branchLocksMu *sync.Mutex
++}
++
++type branchLock struct {
++ // mu should be write-locked whenever the branch is actively running any
++ // function from the remote
++ mu *sync.RWMutex
++ // lastPull is guarded by mu, before reading, one should RLock mu
++ lastPull time.Time
++}
++
++func (c *Generic) Get(ctx context.Context, key core.ObjectKey, obj core.Object) error {
++ return c.readWhenPossible(ctx, func() error {
++ return c.Client.Get(ctx, key, obj)
++ })
++}
++
++func (c *Generic) List(ctx context.Context, list core.ObjectList, opts ...core.ListOption) error {
++ return c.readWhenPossible(ctx, func() error {
++ return c.Client.List(ctx, list, opts...)
++ })
++}
++
++func (c *Generic) readWhenPossible(ctx context.Context, operation func() error) error {
++ ref := core.GetVersionRef(ctx)
++ // If the ref is not writable, we don't have to worry about race conditions
++ if !ref.IsWritable() {
++ return operation()
++ }
++ branch := ref.String()
++
++ // Check if we need to do a pull before
++ if c.needsResync(branch, c.opts.CacheValidDuration) {
++ // Try to pull the remote branch. If it fails, use returnErr to figure out if
++ // this (depending on the configured PACELC mode) is a critical error, or if we
++ // should continue with the read
++ if err := c.pull(ctx, branch); err != nil {
++ if criticalErr := c.returnErr(err); criticalErr != nil {
++ return criticalErr
++ }
++ }
++ }
++ // Do the read operation
++ return operation()
++}
++
++func (c *Generic) getBranchLockInfo(branch string) *branchLock {
++ c.branchLocksMu.Lock()
++ defer c.branchLocksMu.Unlock()
++
++ // Check if there exists a lock for that branch
++ info, ok := c.branchLocks[branch]
++ if ok {
++ return info
++ }
++ // Write to the branchLocks map
++ c.branchLocks[branch] = &branchLock{
++ mu: &sync.RWMutex{},
++ }
++ return c.branchLocks[branch]
++}
++
++func (c *Generic) needsResync(branch string, d time.Duration) bool {
++ lck := c.getBranchLockInfo(branch)
++ // Lock while reading the last resync time
++ lck.mu.RLock()
++ defer lck.mu.RUnlock()
++ // Resync if there has been no sync so far, or if the last resync was too long ago
++ return lck.lastPull.IsZero() || time.Since(lck.lastPull) > d
++}
++
++// StartResyncLoop starts a resync loop for the given branches for
++// the given interval.
++//
++// resyncCacheInterval specifies the interval for which resyncs
++// (remote Pulls) should be run in the background. The duration must
++// be positive, and non-zero.
++//
++// resyncBranches specifies what branches to resync. The default is
++// []string{""}, i.e. only the "default" branch.
++//
++// ctx should be used to cancel the loop, if needed.
++//
++// While it is technically possible to start many of these resync
++// loops, it is not recommended. Start it once, for all the branches
++// you need. The branches will be pulled synchronously in order. The
++// resync interval is non-sliding, which means that the interval
++// includes the time of the operations.
++func (c *Generic) StartResyncLoop(ctx context.Context, resyncCacheInterval time.Duration, resyncBranches ...string) {
++ // Only start this loop if resyncCacheInterval > 0
++ if resyncCacheInterval <= 0 {
++ logrus.Warn("No need to start the resync loop; resyncCacheInterval <= 0")
++ return
++ }
++ // If unset, only sync the default branch.
++ if resyncBranches == nil {
++ resyncBranches = []string{""}
++ }
++
++ // Start the resync goroutine
++ go c.resyncLoop(ctx, resyncCacheInterval, resyncBranches)
++}
++
++func (c *Generic) resyncLoop(ctx context.Context, resyncCacheInterval time.Duration, resyncBranches []string) {
++ logrus.Debug("Starting the resync loop...")
++
++ wait.NonSlidingUntilWithContext(ctx, func(_ context.Context) {
++
++ for _, branch := range resyncBranches {
++ logrus.Tracef("resyncLoop: Will perform pull operation on branch: %q", branch)
++ // Perform a fetch, pull & checkout of the new revision
++ if err := c.pull(ctx, branch); err != nil {
++ logrus.Errorf("resyncLoop: pull failed with error: %v", err)
++ return
++ }
++ }
++ }, resyncCacheInterval)
++ logrus.Info("Exiting the resync loop...")
++}
++
++func (c *Generic) pull(ctx context.Context, branch string) error {
++ // Need to get the branch-specific lock variable
++ lck := c.getBranchLockInfo(branch)
++ // Write-lock while this operation is in progress
++ lck.mu.Lock()
++ defer lck.mu.Unlock()
++
++ // Create a new context that times out after the given duration
++ pullCtx, cancel := context.WithTimeout(ctx, c.opts.PullTimeout)
++ defer cancel()
++
++ // Make a ctx for the given branch
++ ctxForBranch := core.WithVersionRef(pullCtx, core.NewBranchRef(branch))
++ if err := c.remote.Pull(ctxForBranch); err != nil {
++ return err
++ }
++
++ // Register the timestamp into the lock
++ lck.lastPull = time.Now()
++
++ // All good
++ return nil
++}
++
++func (c *Generic) PreTransactionHook(ctx context.Context, info transactional.TxInfo) error {
++ // We count on ctx having the VersionRef registered for the head branch
++
++ // Lock the branch for writing, if supported by the remote
++ // If the lock fails, we DO NOT try to pull, but just exit (either with err or a nil error,
++ // depending on the configured PACELC mode)
++ // TODO: Can we rely on the timeout being exact enough here?
++ // TODO: How to do this before the branch even exists...?
++ if err := c.lock(ctx, info.Options.Timeout); err != nil {
++ return c.returnErr(err)
++ }
++
++ // Always Pull the _base_ branch before a transaction, to be up-to-date
++ // before creating the new head branch
++ if err := c.pull(ctx, info.Base); err != nil {
++ return c.returnErr(err)
++ }
++
++ // All good
++ return nil
++}
++
++func (c *Generic) PreCommitHook(ctx context.Context, commit transactional.Commit, info transactional.TxInfo) error {
++ return nil // nothing to do here
++}
++
++func (c *Generic) PostCommitHook(ctx context.Context, _ transactional.Commit, _ transactional.TxInfo) error {
++ // Push the branch in the ctx
++ if err := c.push(ctx); err != nil {
++ return c.returnErr(err)
++ }
++ return nil
++}
++
++func (c *Generic) PostTransactionHook(ctx context.Context, info transactional.TxInfo) error {
++ // Unlock the head branch, if supported
++ if err := c.unlock(ctx); err != nil {
++ return c.returnErr(err)
++ }
++
++ return nil
++}
++
++func (c *Generic) Remote() Remote {
++ return c.remote
++}
++
++// note: this must ONLY be called from such functions where it is guaranteed that the
++// ctx contains a branch versionref.
++func (c *Generic) branchFromCtx(ctx context.Context) string {
++ return core.GetVersionRef(ctx).String()
++}
++
++func (c *Generic) returnErr(err error) error {
++ // If RemoteErrorStream isn't defined, just pass the error through
++ if c.opts.RemoteErrorStream == nil {
++ return err
++ }
++ // Non-blocking send to the channel, and no return error
++ go func() {
++ c.opts.RemoteErrorStream <- err
++ }()
++ return nil
++}
++
++func (c *Generic) lock(ctx context.Context, d time.Duration) error {
++ lr, ok := c.remote.(LockableRemote)
++ if !ok {
++ return nil
++ }
++
++ // Need to get the branch-specific lock variable
++ lck := c.getBranchLockInfo(c.branchFromCtx(ctx))
++ // Write-lock while this operation is in progress
++ lck.mu.Lock()
++ defer lck.mu.Unlock()
++
++ // Enforce a timeout
++ lockCtx, cancel := context.WithTimeout(ctx, c.opts.LockTimeout)
++ defer cancel()
++
++ return lr.Lock(lockCtx, d)
++}
++
++func (c *Generic) unlock(ctx context.Context) error {
++ lr, ok := c.remote.(LockableRemote)
++ if !ok {
++ return nil
++ }
++
++ // Need to get the branch-specific lock variable
++ lck := c.getBranchLockInfo(c.branchFromCtx(ctx))
++ // Write-lock while this operation is in progress
++ lck.mu.Lock()
++ defer lck.mu.Unlock()
++
++ // Enforce a timeout
++ unlockCtx, cancel := context.WithTimeout(ctx, c.opts.LockTimeout)
++ defer cancel()
++
++ return lr.Unlock(unlockCtx)
++}
++
++func (c *Generic) push(ctx context.Context) error {
++ // Need to get the branch-specific lock variable
++ lck := c.getBranchLockInfo(c.branchFromCtx(ctx))
++ // Write-lock while this operation is in progress
++ lck.mu.Lock()
++ defer lck.mu.Unlock()
++
++ // Create a new context that times out after the given duration
++ pushCtx, cancel := context.WithTimeout(ctx, c.opts.PushTimeout)
++ defer cancel()
++
++ // Push the head branch using the remote
++ // If the Push fails, don't execute any other later statements
++ if err := c.remote.Push(pushCtx); err != nil {
++ return err
++ }
++ return nil
++}
+diff --git a/pkg/storage/client/transactional/distributed/git/git.go b/pkg/storage/client/transactional/distributed/git/git.go
+new file mode 100644
+index 0000000..53cf157
+--- /dev/null
++++ b/pkg/storage/client/transactional/distributed/git/git.go
+@@ -0,0 +1,368 @@
++package git
++
++import (
++ "context"
++ "errors"
++ "fmt"
++ "io/ioutil"
++ "os"
++ "sync"
++ "time"
++
++ "github.com/fluxcd/go-git-providers/gitprovider"
++ git "github.com/go-git/go-git/v5"
++ "github.com/go-git/go-git/v5/plumbing"
++ "github.com/go-git/go-git/v5/plumbing/object"
++ log "github.com/sirupsen/logrus"
++ "github.com/weaveworks/libgitops/pkg/storage/client/transactional"
++ "github.com/weaveworks/libgitops/pkg/storage/client/transactional/distributed"
++)
++
++var (
++ // ErrNotStarted happens if you try to operate on the LocalClone before you have started
++ // it with StartCheckoutLoop.
++ ErrNotStarted = errors.New("the LocalClone hasn't been started (and hence, cloned) yet")
++ // ErrCannotWriteToReadOnly happens if you try to do a write operation for a non-authenticated Git repo.
++ ErrCannotWriteToReadOnly = errors.New("the LocalClone is read-only, cannot write")
++)
++
++const (
++ defaultBranch = "master"
++)
++
++// LocalCloneOptions provides options for the LocalClone.
++// TODO: Refactor this into the controller-runtime Options factory pattern.
++type LocalCloneOptions struct {
++ Branch string // default "master"
++
++ // Authentication method. If unspecified, this clone is read-only.
++ AuthMethod AuthMethod
++}
++
++func (o *LocalCloneOptions) Default() {
++ if o.Branch == "" {
++ o.Branch = defaultBranch
++ }
++}
++
++// LocalClone is an implementation of both a Remote, and a BranchManager, for Git.
++var _ transactional.BranchManager = &LocalClone{}
++var _ distributed.Remote = &LocalClone{}
++
++// Create a new Remote and BranchManager implementation using Git. The repo is cloned immediately
++// in the constructor, you can use ctx to enforce a timeout for the clone.
++func NewLocalClone(ctx context.Context, repoRef gitprovider.RepositoryRef, opts LocalCloneOptions) (*LocalClone, error) {
++ log.Info("Initializing the Git repo...")
++
++ // Default the options
++ opts.Default()
++
++ // Create a temporary directory for the clone
++ tmpDir, err := ioutil.TempDir("", "libgitops")
++ if err != nil {
++ return nil, err
++ }
++ log.Debugf("Created temporary directory for the git clone at %q", tmpDir)
++
++ d := &LocalClone{
++ repoRef: repoRef,
++ opts: opts,
++ cloneDir: tmpDir,
++ lock: &sync.Mutex{},
++ commitHooks: &transactional.MultiCommitHook{},
++ txHooks: &transactional.MultiTransactionHook{},
++ }
++
++ log.Trace("URL endpoint parsed and authentication method chosen")
++
++ if d.canWrite() {
++ log.Infof("Running in read-write mode, will commit back current status to the repo")
++ } else {
++ log.Infof("Running in read-only mode, won't write status back to the repo")
++ }
++
++ // Clone the repo
++ if err := d.clone(ctx); err != nil {
++ return nil, err
++ }
++
++ return d, nil
++}
++
++// LocalClone is an implementation of both a Remote, and a BranchManager, for Git.
++type LocalClone struct {
++ // user-specified options
++ repoRef gitprovider.RepositoryRef
++ opts LocalCloneOptions
++
++ // the temporary directory used for the clone
++ cloneDir string
++
++ // go-git objects. wt is the worktree of the repo, persistent during the lifetime of repo.
++ repo *git.Repository
++ wt *git.Worktree
++
++ // the lock for git operations (so no ops are done simultaneously)
++ lock *sync.Mutex
++
++ commitHooks transactional.CommitHookChain
++ txHooks transactional.TransactionHookChain
++}
++
++func (d *LocalClone) CommitHookChain() transactional.CommitHookChain {
++ return d.commitHooks
++}
++
++func (d *LocalClone) TransactionHookChain() transactional.TransactionHookChain {
++ return d.txHooks
++}
++
++func (d *LocalClone) Dir() string {
++ return d.cloneDir
++}
++
++func (d *LocalClone) MainBranch() string {
++ return d.opts.Branch
++}
++
++func (d *LocalClone) RepositoryRef() gitprovider.RepositoryRef {
++ return d.repoRef
++}
++
++func (d *LocalClone) canWrite() bool {
++ return d.opts.AuthMethod != nil
++}
++
++// verifyRead makes sure it's ok to start a read-something-from-git process
++func (d *LocalClone) verifyRead() error {
++ // Safeguard against not starting yet
++ if d.wt == nil {
++ return fmt.Errorf("cannot pull: %w", ErrNotStarted)
++ }
++ return nil
++}
++
++// verifyWrite makes sure it's ok to start a write-something-to-git process
++func (d *LocalClone) verifyWrite() error {
++ // We need all read privileges first
++ if err := d.verifyRead(); err != nil {
++ return err
++ }
++ // Make sure we don't write to a possibly read-only repo
++ if !d.canWrite() {
++ return ErrCannotWriteToReadOnly
++ }
++ return nil
++}
++
++func (d *LocalClone) clone(ctx context.Context) error {
++ // Lock the mutex now that we're starting, and unlock it when exiting
++ d.lock.Lock()
++ defer d.lock.Unlock()
++
++ cloneURL := d.repoRef.GetCloneURL(d.opts.AuthMethod.TransportType())
++
++ log.Infof("Starting to clone the repository %s", d.repoRef)
++ // Do a clone operation to the temporary directory
++ var err error
++ d.repo, err = git.PlainCloneContext(ctx, d.Dir(), false, &git.CloneOptions{
++ URL: cloneURL,
++ Auth: d.opts.AuthMethod,
++ ReferenceName: plumbing.NewBranchReferenceName(d.opts.Branch),
++ SingleBranch: true,
++ NoCheckout: false,
++ //Depth: 1, // ref: https://github.com/src-d/go-git/issues/1143
++ RecurseSubmodules: 0,
++ Progress: nil,
++ Tags: git.NoTags,
++ })
++ // Handle errors
++ if errors.Is(err, context.DeadlineExceeded) {
++ return fmt.Errorf("git clone operation timed out: %w", err)
++ } else if errors.Is(err, context.Canceled) {
++ return fmt.Errorf("git clone was cancelled: %w", err)
++ } else if err != nil {
++ return fmt.Errorf("git clone error: %v", err)
++ }
++
++ // Populate the worktree pointer
++ d.wt, err = d.repo.Worktree()
++ if err != nil {
++ return fmt.Errorf("git get worktree error: %v", err)
++ }
++
++ // Get the latest HEAD commit and report it to the user
++ ref, err := d.repo.Head()
++ if err != nil {
++ return err
++ }
++
++ log.Infof("Repo cloned; HEAD commit is %s", ref.Hash())
++ return nil
++}
++
++func (d *LocalClone) Pull(ctx context.Context) error {
++ // Lock the mutex now that we're starting, and unlock it when exiting
++ d.lock.Lock()
++ defer d.lock.Unlock()
++
++ // TODO: This should support doing Fetch() only maybe
++ // TODO: Remove the requirement to actually be on the branch
++ // that is being pulled.
++
++ // Make sure it's okay to read
++ if err := d.verifyRead(); err != nil {
++ return err
++ }
++
++ // Perform the git pull operation. The context carries a timeout
++ log.Trace("Starting pull operation")
++ err := d.wt.PullContext(ctx, &git.PullOptions{
++ Auth: d.opts.AuthMethod,
++ SingleBranch: true,
++ })
++
++ // Handle errors
++ if errors.Is(err, git.NoErrAlreadyUpToDate) {
++ // all good, nothing more to do
++ log.Trace("Pull already up-to-date")
++ return nil
++ } else if errors.Is(err, context.DeadlineExceeded) {
++ return fmt.Errorf("git pull operation timed out: %w", err)
++ } else if errors.Is(err, context.Canceled) {
++ return fmt.Errorf("git pull was cancelled: %w", err)
++ } else if err != nil {
++ return fmt.Errorf("git pull error: %v", err)
++ }
++
++ log.Trace("Pulled successfully")
++
++ // Get current HEAD
++ ref, err := d.repo.Head()
++ if err != nil {
++ return err
++ }
++
++ log.Infof("New commit observed %s", ref.Hash())
++ return nil
++}
++
++func (d *LocalClone) Push(ctx context.Context) error {
++ // TODO: Push a specific branch only. Use opts.RefSpecs?
++
++ // Perform the git push operation. The context carries a timeout
++ log.Debug("Starting push operation")
++ err := d.repo.PushContext(ctx, &git.PushOptions{
++ Auth: d.opts.AuthMethod,
++ })
++
++ // Handle errors
++ if errors.Is(err, git.NoErrAlreadyUpToDate) {
++ // TODO: Is it good if there's nothing more to do; or a failure if there's nothing to push?
++ log.Trace("Push already up-to-date")
++ return nil
++ } else if errors.Is(err, context.DeadlineExceeded) {
++ return fmt.Errorf("git push operation timed out: %w", err)
++ } else if errors.Is(err, context.Canceled) {
++ return fmt.Errorf("git push was cancelled: %w", err)
++ } else if err != nil {
++ return fmt.Errorf("git push error: %v", err)
++ }
++
++ log.Trace("Pushed successfully")
++
++ return nil
++}
++
++func (d *LocalClone) CreateBranch(_ context.Context, branch string) error {
++ // Lock the mutex now that we're starting, and unlock it when exiting
++ d.lock.Lock()
++ defer d.lock.Unlock()
++
++ // TODO: Should the caller do a force-reset using ResetToCleanBranch before creating the branch?
++
++ // Make sure it's okay to write
++ if err := d.verifyWrite(); err != nil {
++ return err
++ }
++
++ return d.wt.Checkout(&git.CheckoutOptions{
++ Branch: plumbing.NewBranchReferenceName(branch),
++ Create: true,
++ })
++}
++
++func (d *LocalClone) ResetToCleanBranch(_ context.Context, branch string) error {
++ // Lock the mutex now that we're starting, and unlock it when exiting
++ d.lock.Lock()
++ defer d.lock.Unlock()
++
++ // Make sure it's okay to write
++ if err := d.verifyWrite(); err != nil {
++ return err
++ }
++
++ // Best-effort clean
++ _ = d.wt.Clean(&git.CleanOptions{
++ Dir: true,
++ })
++ // Force-checkout the main branch
++ return d.wt.Checkout(&git.CheckoutOptions{
++ Branch: plumbing.NewBranchReferenceName(branch),
++ Force: true,
++ })
++ // TODO: Do a pull here too?
++}
++
++// Commit creates a commit of all changes in the current worktree with the given parameters.
++// It also automatically pushes the branch after the commit.
++// ErrNotStarted is returned if the repo hasn't been cloned yet.
++// ErrCannotWriteToReadOnly is returned if opts.AuthMethod wasn't provided.
++func (d *LocalClone) Commit(ctx context.Context, commit transactional.Commit) error {
++ // Lock the mutex now that we're starting, and unlock it when exiting
++ d.lock.Lock()
++ defer d.lock.Unlock()
++
++ // Make sure it's okay to write
++ if err := d.verifyWrite(); err != nil {
++ return err
++ }
++
++ s, err := d.wt.Status()
++ if err != nil {
++ return fmt.Errorf("git status failed: %v", err)
++ }
++ if s.IsClean() {
++ log.Debugf("No changed files in git repo, nothing to commit...")
++ // TODO: Should this be an error instead?
++ return nil
++ }
++
++ // Do a commit
++ log.Debug("Committing all local changes")
++ hash, err := d.wt.Commit(commit.GetMessage().String(), &git.CommitOptions{
++ All: true,
++ Author: &object.Signature{
++ Name: commit.GetAuthor().GetName(),
++ Email: commit.GetAuthor().GetEmail(),
++ When: time.Now(),
++ },
++ })
++ if err != nil {
++ return fmt.Errorf("git commit error: %v", err)
++ }
++
++ // Notify upstream that we now have a new commit, and allow writing again
++ log.Infof("A new commit has been created: %q", hash)
++ return nil
++}
++
++// Cleanup cancels running goroutines and operations, and removes the temporary clone directory
++func (d *LocalClone) Cleanup() error {
++ // Remove the temporary directory
++ if err := os.RemoveAll(d.Dir()); err != nil {
++ log.Errorf("Failed to clean up temp git directory: %v", err)
++ return err
++ }
++ return nil
++}
+diff --git a/pkg/storage/client/transactional/distributed/git/github/github.go b/pkg/storage/client/transactional/distributed/git/github/github.go
+new file mode 100644
+index 0000000..23a2012
+--- /dev/null
++++ b/pkg/storage/client/transactional/distributed/git/github/github.go
+@@ -0,0 +1,182 @@
++package github
++
++import (
++ "context"
++ "errors"
++ "fmt"
++
++ "github.com/fluxcd/go-git-providers/github"
++ "github.com/fluxcd/go-git-providers/gitprovider"
++ "github.com/fluxcd/go-git-providers/validation"
++ gogithub "github.com/google/go-github/v32/github"
++ "github.com/sirupsen/logrus"
++ "github.com/weaveworks/libgitops/pkg/storage/client/transactional"
++)
++
++// PullRequest can be returned from a TransactionFunc instead of a CommitResult, if
++// a PullRequest is desired to be created by the PullRequestProvider.
++type PullRequest interface {
++ // PullRequestResult is a superset of CommitResult
++ transactional.Commit
++
++ // GetLabels specifies what labels should be applied on the PR.
++ // +optional
++ GetLabels() []string
++ // GetAssignees specifies what user login names should be assigned to this PR.
++ // Note: Only users with "pull" access or more can be assigned.
++ // +optional
++ GetAssignees() []string
++ // GetMilestone specifies what milestone this should be attached to.
++ // +optional
++ GetMilestone() string
++}
++
++// GenericPullRequest implements PullRequest.
++var _ PullRequest = GenericPullRequest{}
++
++// GenericPullRequest implements PullRequest.
++type GenericPullRequest struct {
++ // GenericPullRequest is a superset of a Commit.
++ transactional.Commit
++
++ // Labels specifies what labels should be applied on the PR.
++ // +optional
++ Labels []string
++ // Assignees specifies what user login names should be assigned to this PR.
++ // Note: Only users with "pull" access or more can be assigned.
++ // +optional
++ Assignees []string
++ // Milestone specifies what milestone this should be attached to.
++ // +optional
++ Milestone string
++}
++
++func (r GenericPullRequest) GetLabels() []string { return r.Labels }
++func (r GenericPullRequest) GetAssignees() []string { return r.Assignees }
++func (r GenericPullRequest) GetMilestone() string { return r.Milestone }
++
++func (r GenericPullRequest) Validate() error {
++ v := validation.New("GenericPullRequest")
++ // Just validate the "inner" object
++ v.Append(r.Commit.Validate(), r.Commit, "Commit")
++ return v.Error()
++}
++
++// TODO: This package should really only depend on go-git-providers' abstraction interface
++
++var ErrProviderNotSupported = errors.New("only the Github go-git-providers provider is supported at the moment")
++
++// NewGitHubPRCommitHandler returns a new transactional.CommitHandler from a gitprovider.Client.
++func NewGitHubPRCommitHandler(c gitprovider.Client, repoRef gitprovider.RepositoryRef) (transactional.CommitHook, error) {
++ // Make sure a Github client was passed
++ if c.ProviderID() != github.ProviderID {
++ return nil, ErrProviderNotSupported
++ }
++ return &prCreator{c, repoRef}, nil
++}
++
++type prCreator struct {
++ c gitprovider.Client
++ repoRef gitprovider.RepositoryRef
++}
++
++func (c *prCreator) PreCommitHook(ctx context.Context, commit transactional.Commit, info transactional.TxInfo) error {
++ return nil
++}
++
++func (c *prCreator) PostCommitHook(ctx context.Context, commit transactional.Commit, info transactional.TxInfo) error {
++ // First, validate the input
++ if err := commit.Validate(); err != nil {
++ return fmt.Errorf("given transactional.Commit wasn't valid")
++ }
++
++ prCommit, ok := commit.(PullRequest)
++ if !ok {
++ return nil
++ }
++
++ // Use the "raw" go-github client to do this
++ ghClient := c.c.Raw().(*gogithub.Client)
++
++ // Helper variables
++ owner := c.repoRef.GetIdentity()
++ repo := c.repoRef.GetRepository()
++ var body *string
++ if commit.GetMessage().GetDescription() != "" {
++ body = gogithub.String(commit.GetMessage().GetDescription())
++ }
++
++ // Create the Pull Request
++ prPayload := &gogithub.NewPullRequest{
++ Head: gogithub.String(info.Head),
++ Base: gogithub.String(info.Base),
++ Title: gogithub.String(commit.GetMessage().GetTitle()),
++ Body: body,
++ }
++ logrus.Infof("GitHub PR payload: %+v", prPayload)
++ pr, _, err := ghClient.PullRequests.Create(ctx, owner, repo, prPayload)
++ if err != nil {
++ return err
++ }
++
++ // If spec.GetMilestone() is set, fetch the ID of the milestone
++ // Only set milestoneID to non-nil if specified
++ var milestoneID *int
++ if len(prCommit.GetMilestone()) != 0 {
++ milestoneID, err = getMilestoneID(ctx, ghClient, owner, repo, prCommit.GetMilestone())
++ if err != nil {
++ return err
++ }
++ }
++
++ // Only set assignees to non-nil if specified
++ var assignees *[]string
++ if a := prCommit.GetAssignees(); len(a) != 0 {
++ assignees = &a
++ }
++
++ // Only set labels to non-nil if specified
++ var labels *[]string
++ if l := prCommit.GetLabels(); len(l) != 0 {
++ labels = &l
++ }
++
++ // Only PATCH the PR if any of the fields were set
++ if milestoneID != nil || assignees != nil || labels != nil {
++ _, _, err := ghClient.Issues.Edit(ctx, owner, repo, pr.GetNumber(), &gogithub.IssueRequest{
++ Milestone: milestoneID,
++ Assignees: assignees,
++ Labels: labels,
++ })
++ if err != nil {
++ return err
++ }
++ }
++
++ return nil
++}
++
++func getMilestoneID(ctx context.Context, c *gogithub.Client, owner, repo, milestoneName string) (*int, error) {
++ // List all milestones in the repo
++ // TODO: This could/should use pagination
++ milestones, _, err := c.Issues.ListMilestones(ctx, owner, repo, &gogithub.MilestoneListOptions{
++ State: "all",
++ })
++ if err != nil {
++ return nil, err
++ }
++ // Loop through all milestones, search for one with the right name
++ for _, milestone := range milestones {
++ // Only consider a milestone with the right name
++ if milestone.GetTitle() != milestoneName {
++ continue
++ }
++ // Validate nil to avoid panics
++ if milestone.Number == nil {
++ return nil, fmt.Errorf("didn't expect milestone Number to be nil: %v", milestone)
++ }
++ // Return the Milestone number
++ return milestone.Number, nil
++ }
++ return nil, fmt.Errorf("couldn't find milestone with name: %s", milestoneName)
++}
+diff --git a/pkg/gitdir/transport.go b/pkg/storage/client/transactional/distributed/git/transport.go
+similarity index 97%
+rename from pkg/gitdir/transport.go
+rename to pkg/storage/client/transactional/distributed/git/transport.go
+index df2c325..3017853 100644
+--- a/pkg/gitdir/transport.go
++++ b/pkg/storage/client/transactional/distributed/git/transport.go
+@@ -1,10 +1,10 @@
+-package gitdir
++package git
+
+ import (
+ "errors"
+
+ "github.com/fluxcd/go-git-providers/gitprovider"
+- "github.com/fluxcd/toolkit/pkg/ssh/knownhosts"
++ "github.com/fluxcd/pkg/ssh/knownhosts"
+ "github.com/go-git/go-git/v5/plumbing/transport"
+ "github.com/go-git/go-git/v5/plumbing/transport/http"
+ "github.com/go-git/go-git/v5/plumbing/transport/ssh"
+diff --git a/pkg/storage/client/transactional/distributed/interfaces.go b/pkg/storage/client/transactional/distributed/interfaces.go
+new file mode 100644
+index 0000000..8110599
+--- /dev/null
++++ b/pkg/storage/client/transactional/distributed/interfaces.go
+@@ -0,0 +1,75 @@
++package distributed
++
++import (
++ "context"
++ "time"
++
++ "github.com/weaveworks/libgitops/pkg/storage/client/transactional"
++)
++
++// Client is a client that can sync state with a remote in a transactional way.
++type Client interface {
++ // The distributed Client extends the transactional Client
++ transactional.Client
++ // This Client is itself both a CommitHook and TransactionHook; these should
++ // be automatically registered with the transactional.Client's BranchManager
++ // in this Client's constructor.
++ transactional.CommitHook
++ transactional.TransactionHook
++
++ // StartResyncLoop starts a resync loop for the given branches for
++ // the given interval.
++ //
++ // resyncCacheInterval specifies the interval for which resyncs
++ // (remote Pulls) should be run in the background. The duration must
++ // be positive, and non-zero.
++ //
++ // resyncBranches specifies what branches to resync. The default is
++ // []string{""}, i.e. only the "default" branch.
++ //
++ // ctx should be used to cancel the loop, if needed.
++ //
++ // While it is technically possible to start many of these resync
++ // loops, it is not recommended. Start it once, for all the branches
++ // you need. The branches will be pulled synchronously in order. The
++ // resync interval is non-sliding, which means that the interval
++ // includes the time of the operations.
++ StartResyncLoop(ctx context.Context, resyncCacheInterval time.Duration, resyncBranches ...string)
++
++ // Remote exposes the underlying remote used
++ Remote() Remote
++}
++
++type Remote interface {
++ // Push pushes the attached branch (of the ctx) to the remote.
++ // Push must block as long as the operation is in progress, but also
++ // respect the timeout set on ctx and return instantly after it expires.
++ //
++ // It is guaranteed that Pull() and Push() are never called racily at
++ // the same time for the same branch, BUT Pull() and Push() might be called
++ // at the same time in any order for distinct branches. If the underlying
++ // Remote transport only supports one "writer transport" to it at the same time,
++ // the Remote must coordinate pulls and pushes with a mutex internally.
++ Push(ctx context.Context) error
++
++ // Pull pulls the attached branch (of the ctx) from the remote.
++ // Pull must block as long as the operation is in progress, but also
++ // respect the timeout set on ctx and return instantly after it expires.
++ //
++ // It is guaranteed that Pull() and Push() are never called racily at
++ // the same time for the same branch, BUT Pull() and Push() might be called
++ // at the same time in any order for distinct branches. If the underlying
++ // Remote transport only supports one "writer transport" to it at the same time,
++ // the Remote must coordinate pulls and pushes with a mutex internally.
++ Pull(ctx context.Context) error
++}
++
++// LockableRemote describes a remote that supports locking a remote branch for writing.
++type LockableRemote interface {
++ Remote
++
++ // Lock locks the branch attached to the context for writing, for the given duration.
++ Lock(ctx context.Context, d time.Duration) error
++ // Unlock reverses the write lock created by Lock()
++ Unlock(ctx context.Context) error
++}
+diff --git a/pkg/storage/client/transactional/distributed/options.go b/pkg/storage/client/transactional/distributed/options.go
+new file mode 100644
+index 0000000..4640ce9
+--- /dev/null
++++ b/pkg/storage/client/transactional/distributed/options.go
+@@ -0,0 +1,97 @@
++package distributed
++
++import "time"
++
++// ClientOption is an interface for applying options to ClientOptions.
++type ClientOption interface {
++ ApplyToClient(*ClientOptions)
++}
++
++// ClientOptions specify options on how the distributed client should
++// act according to the PACELC theorem.
++//
++// The following configurations correspond to the PACELC levels:
++//
++// PC/EC: CacheValidDuration == 0 && RemoteErrorStream == nil:
++// This makes every read first do a remote Pull(), and fails
++// critically if the Pull operation fails. Transactions fail
++// if Push() fails.
++//
++// PC/EL: CacheValidDuration > 0 && RemoteErrorStream == nil:
++// This makes a read do a remote Pull only if the delta between
++// the last Pull and time.Now() exceeds CacheValidDuration.
++// StartResyncLoop(resyncCacheInterval) can be used to
++// periodically Pull in the background, so that the latency
++// of reads are minimal. Transactions and reads fail if
++// Push() or Pull() fail.
++//
++// PA/EL: RemoteErrorStream != nil:
++// How often reads invoke Pull() is given by CacheValidDuration
++// and StartResyncLoop(resyncCacheInterval) as per above.
++// However, when a Pull() or Push() is invoked from a read or
++// transaction, and a network partition happens, such errors are
++// non-critical for the operation to succeed, as Availability is
++// favored and cached objects are returned.
++type ClientOptions struct {
++ // CacheValidDuration is the period of time the cache is still
++ // valid since its last resync (remote Pull). If set to 0; all
++ // reads will invoke a resync right before reading; as the cache
++ // is never valid. This option set to 0 favors Consistency over
++ // Availability.
++ //
++ // CacheValidDuration == 0 and RemoteErrorStream != nil must not
++ // be set at the same time; as they contradict.
++ //
++ // Default: 1m
++ CacheValidDuration time.Duration
++ // RemoteErrorStream specifies a stream in which to readirect
++ // errors from the remote, instead of returning them to the caller.
++ // This is useful for allowing "offline operation", and favoring
++ // Availability over Consistency when a Partition happens (i.e.
++ // the network is unreachable). In normal operation, remote Push/Pull
++ // errors would propagate to the caller and "fail" the Transaction,
++ // however, if that is not desired, those errors can be propagated
++ // here, and the caller will succeed with the transaction.
++ // Default: nil (optional)
++ RemoteErrorStream chan error
++
++ // Default: 30s for all
++ LockTimeout time.Duration
++ PullTimeout time.Duration
++ PushTimeout time.Duration
++}
++
++func (o *ClientOptions) ApplyToClient(target *ClientOptions) {
++ if o.CacheValidDuration != 0 {
++ target.CacheValidDuration = o.CacheValidDuration
++ }
++ if o.RemoteErrorStream != nil {
++ target.RemoteErrorStream = o.RemoteErrorStream
++ }
++ if o.LockTimeout != 0 {
++ target.LockTimeout = o.LockTimeout
++ }
++ if o.PullTimeout != 0 {
++ target.PullTimeout = o.PullTimeout
++ }
++ if o.PushTimeout != 0 {
++ target.PushTimeout = o.PushTimeout
++ }
++}
++
++func (o *ClientOptions) ApplyOptions(opts []ClientOption) *ClientOptions {
++ for _, opt := range opts {
++ opt.ApplyToClient(o)
++ }
++ return o
++}
++
++func defaultOptions() *ClientOptions {
++ return &ClientOptions{
++ CacheValidDuration: 1 * time.Minute,
++ RemoteErrorStream: nil,
++ LockTimeout: 30 * time.Second,
++ PullTimeout: 30 * time.Second,
++ PushTimeout: 30 * time.Second,
++ }
++}
+diff --git a/pkg/storage/client/transactional/handlers.go b/pkg/storage/client/transactional/handlers.go
+new file mode 100644
+index 0000000..aa438e3
+--- /dev/null
++++ b/pkg/storage/client/transactional/handlers.go
+@@ -0,0 +1,103 @@
++package transactional
++
++import "context"
++
++type TxInfo struct {
++ Base string
++ Head string
++ Options TxOptions
++}
++
++type CommitHookChain interface {
++ // The chain also itself implements CommitHook
++ CommitHook
++ // Register registers a new CommitHook to the chain
++ Register(CommitHook)
++}
++
++type CommitHook interface {
++ PreCommitHook(ctx context.Context, commit Commit, info TxInfo) error
++ PostCommitHook(ctx context.Context, commit Commit, info TxInfo) error
++}
++
++var _ CommitHookChain = &MultiCommitHook{}
++var _ CommitHook = &MultiCommitHook{}
++
++type MultiCommitHook struct {
++ CommitHooks []CommitHook
++}
++
++func (m *MultiCommitHook) Register(h CommitHook) {
++ m.CommitHooks = append(m.CommitHooks, h)
++}
++
++func (m *MultiCommitHook) PreCommitHook(ctx context.Context, commit Commit, info TxInfo) error {
++ for _, ch := range m.CommitHooks {
++ if ch == nil {
++ continue
++ }
++ if err := ch.PreCommitHook(ctx, commit, info); err != nil {
++ return err
++ }
++ }
++ return nil
++}
++
++func (m *MultiCommitHook) PostCommitHook(ctx context.Context, commit Commit, info TxInfo) error {
++ for _, ch := range m.CommitHooks {
++ if ch == nil {
++ continue
++ }
++ if err := ch.PostCommitHook(ctx, commit, info); err != nil {
++ return err
++ }
++ }
++ return nil
++}
++
++type TransactionHookChain interface {
++ // The chain also itself implements TransactionHook
++ TransactionHook
++ // Register registers a new CommitHook to the chain
++ Register(TransactionHook)
++}
++
++type TransactionHook interface {
++ PreTransactionHook(ctx context.Context, info TxInfo) error
++ PostTransactionHook(ctx context.Context, info TxInfo) error
++}
++
++var _ TransactionHookChain = &MultiTransactionHook{}
++var _ TransactionHook = &MultiTransactionHook{}
++
++type MultiTransactionHook struct {
++ TransactionHooks []TransactionHook
++}
++
++func (m *MultiTransactionHook) Register(h TransactionHook) {
++ m.TransactionHooks = append(m.TransactionHooks, h)
++}
++
++func (m *MultiTransactionHook) PreTransactionHook(ctx context.Context, info TxInfo) error {
++ for _, th := range m.TransactionHooks {
++ if th == nil {
++ continue
++ }
++ if err := th.PreTransactionHook(ctx, info); err != nil {
++ return err
++ }
++ }
++ return nil
++}
++
++func (m *MultiTransactionHook) PostTransactionHook(ctx context.Context, info TxInfo) error {
++ for _, th := range m.TransactionHooks {
++ if th == nil {
++ continue
++ }
++ if err := th.PostTransactionHook(ctx, info); err != nil {
++ return err
++ }
++ }
++ return nil
++}
+diff --git a/pkg/storage/client/transactional/interfaces.go b/pkg/storage/client/transactional/interfaces.go
+new file mode 100644
+index 0000000..7371f4c
+--- /dev/null
++++ b/pkg/storage/client/transactional/interfaces.go
+@@ -0,0 +1,82 @@
++package transactional
++
++import (
++ "context"
++
++ "github.com/weaveworks/libgitops/pkg/storage/client"
++ "github.com/weaveworks/libgitops/pkg/storage/core"
++)
++
++type Client interface {
++ client.Reader
++
++ BranchManager() BranchManager
++ BranchMerger() BranchMerger
++
++ Transaction(ctx context.Context, opts ...TxOption) Tx
++ BranchTransaction(ctx context.Context, branchName string, opts ...TxOption) BranchTx
++}
++
++type BranchManager interface {
++ CreateBranch(ctx context.Context, branch string) error
++ ResetToCleanBranch(ctx context.Context, branch string) error
++ Commit(ctx context.Context, commit Commit) error
++
++ // CommitHookChain must be non-nil, but can be a no-op
++ CommitHookChain() CommitHookChain
++ // TransactionHookChain must be non-nil, but can be a no-op
++ TransactionHookChain() TransactionHookChain
++}
++
++type BranchMerger interface {
++ MergeBranches(ctx context.Context, base, head string, commit Commit) error
++}
++
++type CustomTxFunc func(ctx context.Context) error
++
++type Tx interface {
++ Commit(Commit) error
++ Abort(err error) error
++
++ Client() client.Client
++
++ Custom(CustomTxFunc) Tx
++
++ Get(key core.ObjectKey, obj core.Object) Tx
++ List(list core.ObjectList, opts ...core.ListOption) Tx
++
++ Create(obj core.Object, opts ...core.CreateOption) Tx
++ Update(obj core.Object, opts ...core.UpdateOption) Tx
++ Patch(obj core.Object, patch core.Patch, opts ...core.PatchOption) Tx
++ Delete(obj core.Object, opts ...core.DeleteOption) Tx
++ DeleteAllOf(obj core.Object, opts ...core.DeleteAllOfOption) Tx
++
++ UpdateStatus(obj core.Object, opts ...core.UpdateOption) Tx
++ PatchStatus(obj core.Object, patch core.Patch, opts ...core.PatchOption) Tx
++}
++
++type BranchTx interface {
++ CreateTx(Commit) BranchTxResult
++ Abort(err error) error
++
++ Client() client.Client
++
++ Custom(CustomTxFunc) BranchTx
++
++ Get(key core.ObjectKey, obj core.Object) BranchTx
++ List(list core.ObjectList, opts ...core.ListOption) BranchTx
++
++ Create(obj core.Object, opts ...core.CreateOption) BranchTx
++ Update(obj core.Object, opts ...core.UpdateOption) BranchTx
++ Patch(obj core.Object, patch core.Patch, opts ...core.PatchOption) BranchTx
++ Delete(obj core.Object, opts ...core.DeleteOption) BranchTx
++ DeleteAllOf(obj core.Object, opts ...core.DeleteAllOfOption) BranchTx
++
++ UpdateStatus(obj core.Object, opts ...core.UpdateOption) BranchTx
++ PatchStatus(obj core.Object, patch core.Patch, opts ...core.PatchOption) BranchTx
++}
++
++type BranchTxResult interface {
++ Error() error
++ MergeWithBase(Commit) error
++}
+diff --git a/pkg/storage/client/transactional/options.go b/pkg/storage/client/transactional/options.go
+new file mode 100644
+index 0000000..6b3679c
+--- /dev/null
++++ b/pkg/storage/client/transactional/options.go
+@@ -0,0 +1,66 @@
++package transactional
++
++import "time"
++
++type TxOption interface {
++ ApplyToTx(*TxOptions)
++}
++
++var _ TxOption = &TxOptions{}
++
++func defaultTxOptions() *TxOptions {
++ return &TxOptions{
++ Timeout: 1 * time.Minute,
++ Mode: TxModeAtomic,
++ }
++}
++
++type TxOptions struct {
++ Timeout time.Duration
++ Mode TxMode
++}
++
++func (o *TxOptions) ApplyToTx(target *TxOptions) {
++ if o.Timeout != 0 {
++ target.Timeout = o.Timeout
++ }
++ if len(o.Mode) != 0 {
++ target.Mode = o.Mode
++ }
++}
++
++func (o *TxOptions) ApplyOptions(opts []TxOption) *TxOptions {
++ for _, opt := range opts {
++ opt.ApplyToTx(o)
++ }
++ return o
++}
++
++var _ TxOption = TxMode("")
++
++type TxMode string
++
++const (
++ // TxModeAtomic makes the transaction fully atomic, i.e. so
++ // that any read happening against the target branch during the
++ // lifetime of the transaction will be blocked until the completition
++ // of the transaction.
++ TxModeAtomic TxMode = "Atomic"
++ // TxModeAllowReading will allow reads targeting the given
++ // branch a transaction is executing against; but before the
++ // transaction has completed all reads will strictly return
++ // the data available prior to the transaction taking place.
++ TxModeAllowReading TxMode = "AllowReading"
++)
++
++func (m TxMode) ApplyToTx(target *TxOptions) {
++ target.Mode = m
++}
++
++var _ TxOption = TxTimeout(0)
++
++type TxTimeout time.Duration
++
++func (t TxTimeout) ApplyToTx(target *TxOptions) {
++ target.Timeout = time.Duration(t)
++}
+diff --git a/pkg/storage/client/transactional/tx.go b/pkg/storage/client/transactional/tx.go
+new file mode 100644
+index 0000000..30c6b6c
+--- /dev/null
++++ b/pkg/storage/client/transactional/tx.go
+@@ -0,0 +1,24 @@
++package transactional
++
++type txImpl struct {
++ *txCommon
++}
++
++func (tx *txImpl) Commit(c Commit) error {
++ // Run the operations, and try to create the commit
++ if err := tx.tryApplyAndCommitOperations(c); err != nil {
++ // If we failed with the transaction, abort directly
++ return tx.Abort(err)
++ }
++
++ // We successfully completed all the tasks needed
++ // Now, cleanup and unlock the branch
++ return tx.cleanupFunc()
++}
++
++func (tx *txImpl) Custom(op CustomTxFunc) Tx {
++ tx.ops = append(tx.ops, func() error {
++ return op(tx.ctx)
++ })
++ return tx
++}
+diff --git a/pkg/storage/client/transactional/tx_branch.go b/pkg/storage/client/transactional/tx_branch.go
+new file mode 100644
+index 0000000..c7011a3
+--- /dev/null
++++ b/pkg/storage/client/transactional/tx_branch.go
+@@ -0,0 +1,71 @@
++package transactional
++
++import (
++ "context"
++ "fmt"
++)
++
++type txBranchImpl struct {
++ *txCommon
++
++ merger BranchMerger
++}
++
++func (tx *txBranchImpl) CreateTx(c Commit) BranchTxResult {
++ // Run the operations, and try to create the commit
++ if err := tx.tryApplyAndCommitOperations(c); err != nil {
++ // If we failed with the transaction, abort directly, and
++ // return the error wrapped in a BranchTxResult
++ abortErr := tx.Abort(err)
++ return newErrTxResult(abortErr)
++ }
++
++ // We successfully completed all the tasks needed
++ // Now, cleanup and unlock the branch
++ cleanupErr := tx.cleanupFunc()
++
++ // Allow the merger to merge, if supported
++ return &txResultImpl{
++ err: cleanupErr,
++ ctx: tx.ctx,
++ merger: tx.merger,
++ baseBranch: tx.info.Base,
++ headBranch: tx.info.Head,
++ }
++}
++
++func (tx *txBranchImpl) Custom(op CustomTxFunc) BranchTx {
++ tx.ops = append(tx.ops, func() error {
++ return op(tx.ctx)
++ })
++ return tx
++}
++
++func newErrTxResult(err error) *txResultImpl {
++ return &txResultImpl{err: err}
++}
++
++type txResultImpl struct {
++ err error
++ ctx context.Context
++ merger BranchMerger
++ baseBranch string
++ headBranch string
++}
++
++func (r *txResultImpl) Error() error {
++ return r.err
++}
++
++func (r *txResultImpl) MergeWithBase(c Commit) error {
++ // If there is an internal error, return it
++ if r.err != nil {
++ return r.err
++ }
++ // Make sure we have a merger
++ if r.merger == nil {
++ return fmt.Errorf("TxResult: The BranchMerger is nil")
++ }
++ // Try to merge the branch
++ return r.merger.MergeBranches(r.ctx, r.baseBranch, r.headBranch, c)
++}
+diff --git a/pkg/storage/client/transactional/tx_common.go b/pkg/storage/client/transactional/tx_common.go
+new file mode 100644
+index 0000000..3448c81
+--- /dev/null
++++ b/pkg/storage/client/transactional/tx_common.go
+@@ -0,0 +1,70 @@
++package transactional
++
++import (
++ "context"
++
++ "github.com/weaveworks/libgitops/pkg/storage/client"
++ utilerrs "k8s.io/apimachinery/pkg/util/errors"
++)
++
++type txFunc func() error
++
++type txCommon struct {
++ err error
++ c client.Client
++ manager BranchManager
++ ctx context.Context
++ ops []txFunc
++ info TxInfo
++ cleanupFunc txFunc
++}
++
++func (tx *txCommon) Client() client.Client {
++ return tx.c
++}
++
++func (tx *txCommon) Abort(err error) error {
++ // Run the cleanup function and return an aggregate of the two possible errors
++ return utilerrs.NewAggregate([]error{
++ err,
++ tx.cleanupFunc(),
++ })
++}
++
++func (tx *txCommon) handlePreCommit(c Commit) txFunc {
++ return func() error {
++ return tx.manager.CommitHookChain().PreCommitHook(tx.ctx, c, tx.info)
++ }
++}
++
++func (tx *txCommon) commit(c Commit) txFunc {
++ return func() error {
++ return tx.manager.Commit(tx.ctx, c)
++ }
++}
++
++func (tx *txCommon) handlePostCommit(c Commit) txFunc {
++ return func() error {
++ return tx.manager.CommitHookChain().PostCommitHook(tx.ctx, c, tx.info)
++ }
++}
++
++func (tx *txCommon) tryApplyAndCommitOperations(c Commit) error {
++ // If an error occurred already before, just return it directly
++ if tx.err != nil {
++ return tx.err
++ }
++
++ // First, all registered client operations are run
++ // Then Pre-commit, commit, and post-commit functions are run
++ // If at any stage the context is cancelled, an error is returned
++ // immediately, and no more functions in the chain are run. The
++ // same goes for errors from any of the functions, the chain is
++ // immediately interrupted on errors.
++ return execTransactionsCtx(tx.ctx, append(
++ tx.ops,
++ tx.handlePreCommit(c),
++ tx.commit(c),
++ tx.handlePostCommit(c),
++ ))
++}
+diff --git a/pkg/storage/client/transactional/tx_ops.go b/pkg/storage/client/transactional/tx_ops.go
+new file mode 100644
+index 0000000..e0a6c37
+--- /dev/null
++++ b/pkg/storage/client/transactional/tx_ops.go
+@@ -0,0 +1,105 @@
++package transactional
++
++import (
++ "context"
++
++ "github.com/weaveworks/libgitops/pkg/storage/core"
++)
++
++func (tx *txImpl) Get(key core.ObjectKey, obj core.Object) Tx {
++ return tx.Custom(func(ctx context.Context) error {
++ return tx.c.Get(ctx, key, obj)
++ })
++}
++func (tx *txImpl) List(list core.ObjectList, opts ...core.ListOption) Tx {
++ return tx.Custom(func(ctx context.Context) error {
++ return tx.c.List(ctx, list, opts...)
++ })
++}
++
++func (tx *txImpl) Create(obj core.Object, opts ...core.CreateOption) Tx {
++ return tx.Custom(func(ctx context.Context) error {
++ return tx.c.Create(ctx, obj, opts...)
++ })
++}
++func (tx *txImpl) Update(obj core.Object, opts ...core.UpdateOption) Tx {
++ return tx.Custom(func(ctx context.Context) error {
++ return tx.c.Update(ctx, obj, opts...)
++ })
++}
++func (tx *txImpl) Patch(obj core.Object, patch core.Patch, opts ...core.PatchOption) Tx {
++ return tx.Custom(func(ctx context.Context) error {
++ return tx.c.Patch(ctx, obj, patch, opts...)
++ })
++}
++func (tx *txImpl) Delete(obj core.Object, opts ...core.DeleteOption) Tx {
++ return tx.Custom(func(ctx context.Context) error {
++ return tx.c.Delete(ctx, obj, opts...)
++ })
++}
++func (tx *txImpl) DeleteAllOf(obj core.Object, opts ...core.DeleteAllOfOption) Tx {
++ return tx.Custom(func(ctx context.Context) error {
++ return tx.c.DeleteAllOf(ctx, obj, opts...)
++ })
++}
++
++func (tx *txImpl) UpdateStatus(obj core.Object, opts ...core.UpdateOption) Tx {
++ return tx.Custom(func(ctx context.Context) error {
++ return nil // TODO tx.c.Status().Update(ctx, obj, opts...)
++ })
++}
++func (tx *txImpl) PatchStatus(obj core.Object, patch core.Patch, opts ...core.PatchOption) Tx {
++ return tx.Custom(func(ctx context.Context) error {
++ return nil // TODO tx.c.Status().Patch(ctx, obj, patch, opts...)
++ })
++}
++
++// TODO
++
++func (tx *txBranchImpl) Get(key core.ObjectKey, obj core.Object) BranchTx {
++ return tx.Custom(func(ctx context.Context) error {
++ return tx.c.Get(ctx, key, obj)
++ })
++}
++func (tx *txBranchImpl) List(list core.ObjectList, opts ...core.ListOption) BranchTx {
++ return tx.Custom(func(ctx context.Context) error {
++ return tx.c.List(ctx, list, opts...)
++ })
++}
++
++func (tx *txBranchImpl) Create(obj core.Object, opts ...core.CreateOption) BranchTx {
++ return tx.Custom(func(ctx context.Context) error {
++ return tx.c.Create(ctx, obj, opts...)
++ })
++}
++func (tx *txBranchImpl) Update(obj core.Object, opts ...core.UpdateOption) BranchTx {
++ return tx.Custom(func(ctx context.Context) error {
++ return tx.c.Update(ctx, obj, opts...)
++ })
++}
++func (tx *txBranchImpl) Patch(obj core.Object, patch core.Patch, opts ...core.PatchOption) BranchTx {
++ return tx.Custom(func(ctx context.Context) error {
++ return tx.c.Patch(ctx, obj, patch, opts...)
++ })
++}
++func (tx *txBranchImpl) Delete(obj core.Object, opts ...core.DeleteOption) BranchTx {
++ return tx.Custom(func(ctx context.Context) error {
++ return tx.c.Delete(ctx, obj, opts...)
++ })
++}
++func (tx *txBranchImpl) DeleteAllOf(obj core.Object, opts ...core.DeleteAllOfOption) BranchTx {
++ return tx.Custom(func(ctx context.Context) error {
++ return tx.c.DeleteAllOf(ctx, obj, opts...)
++ })
++}
++
++func (tx *txBranchImpl) UpdateStatus(obj core.Object, opts ...core.UpdateOption) BranchTx {
++ return tx.Custom(func(ctx context.Context) error {
++ return nil // TODO tx.c.Status().Update(ctx, obj, opts...)
++ })
++}
++func (tx *txBranchImpl) PatchStatus(obj core.Object, patch core.Patch, opts ...core.PatchOption) BranchTx {
++ return tx.Custom(func(ctx context.Context) error {
++ return nil // TODO tx.c.Status().Patch(ctx, obj, patch, opts...)
++ })
++}
+diff --git a/pkg/storage/client/transactional/utils.go b/pkg/storage/client/transactional/utils.go
+new file mode 100644
+index 0000000..4812266
+--- /dev/null
++++ b/pkg/storage/client/transactional/utils.go
+@@ -0,0 +1,21 @@
++package transactional
++
++import "context"
++
++// execTransactionsCtx executes the functions in order. Before each
++// function in the chain is run; the context is checked for errors
++// (e.g. if it has been cancelled or timed out). If a context error
++// is returned, or if a function in the chain returns an error, this
++// function returns directly, without executing the rest of the
++// functions in the chain.
++func execTransactionsCtx(ctx context.Context, funcs []txFunc) error {
++ for _, fn := range funcs {
++ if err := ctx.Err(); err != nil {
++ return err
++ }
++ if err := fn(); err != nil {
++ return err
++ }
++ }
++ return nil
++}
+diff --git a/pkg/storage/client/utils.go b/pkg/storage/client/utils.go
+new file mode 100644
+index 0000000..da86908
+--- /dev/null
++++ b/pkg/storage/client/utils.go
+@@ -0,0 +1,23 @@
++package client
++
++import (
++ "errors"
++ "fmt"
++
++ "github.com/weaveworks/libgitops/pkg/storage/core"
++ "k8s.io/apimachinery/pkg/runtime"
++)
++
++var ErrNoMetadata = errors.New("it is required to embed ObjectMeta into the serialized API type")
++
++func NewObjectForGVK(gvk core.GroupVersionKind, scheme *runtime.Scheme) (core.Object, error) {
++ kobj, err := scheme.New(gvk)
++ if err != nil {
++ return nil, err
++ }
++ obj, ok := kobj.(core.Object)
++ if !ok {
++ return nil, fmt.Errorf("%w: %s", ErrNoMetadata, gvk)
++ }
++ return obj, nil
++}
+diff --git a/pkg/storage/core/errors.go b/pkg/storage/core/errors.go
+new file mode 100644
+index 0000000..f65895a
+--- /dev/null
++++ b/pkg/storage/core/errors.go
+@@ -0,0 +1,50 @@
++package core
++
++import (
++ goerrors "errors"
++
++ "k8s.io/apimachinery/pkg/api/errors"
++ "k8s.io/apimachinery/pkg/runtime/schema"
++ "k8s.io/apimachinery/pkg/util/validation/field"
++)
++
++var (
++ // ErrNotImplemented can be returned for implementers that do not
++ // implement a specific part of an interface.
++ ErrNotImplemented = goerrors.New("not implemented")
++ // ErrInvalidParameter specifies that a given parameter
++ // (as a public struct field or function argument) was
++ // not valid according to the specification.
++ ErrInvalidParameter = goerrors.New("invalid parameter")
++)
++
++// StatusError is an error that supports also conversion
++// to a metav1.Status struct for more detailed information.
++type StatusError interface {
++ error
++ errors.APIStatus
++}
++
++func NewErrNotFound(id UnversionedObjectID) StatusError {
++ return errors.NewNotFound(schema.GroupResource{
++ Group: id.GroupKind().Group,
++ Resource: id.GroupKind().Kind,
++ }, id.ObjectKey().Name)
++}
++
++func NewErrAlreadyExists(id UnversionedObjectID) StatusError {
++ return errors.NewAlreadyExists(schema.GroupResource{
++ Group: id.GroupKind().Group,
++ Resource: id.GroupKind().Kind,
++ }, id.ObjectKey().Name)
++}
++
++func NewErrInvalid(id UnversionedObjectID, errs field.ErrorList) StatusError {
++ return errors.NewInvalid(id.GroupKind(), id.ObjectKey().Name, errs)
++}
++
++var (
++ IsErrNotFound = errors.IsNotFound
++ IsErrAlreadyExists = errors.IsAlreadyExists
++ IsErrInvalid = errors.IsInvalid
++)
+diff --git a/pkg/storage/core/interfaces.go b/pkg/storage/core/interfaces.go
+new file mode 100644
+index 0000000..b25cec3
+--- /dev/null
++++ b/pkg/storage/core/interfaces.go
+@@ -0,0 +1,86 @@
++package core
++
++import (
++ "context"
++
++ "k8s.io/apimachinery/pkg/runtime/schema"
++ "k8s.io/apimachinery/pkg/types"
++ "sigs.k8s.io/controller-runtime/pkg/client"
++)
++
++// Note: package core must not depend on any other parts of the libgitops repo, possibly the serializer package as an exception.
++// Anything under k8s.io/apimachinery goes though, and important external imports
++// like github.com/spf13/afero is also ok. The pretty large sigs.k8s.io/controller-runtime
++// import is a bit sub-optimal, though.
++
++// GroupVersionKind aliases
++type GroupKind = schema.GroupKind
++type GroupVersion = schema.GroupVersion
++type GroupVersionKind = schema.GroupVersionKind
++
++// Client-related Object aliases
++type Object = client.Object
++type ObjectKey = types.NamespacedName
++type ObjectList = client.ObjectList
++type Patch = client.Patch
++
++// Client-related Option aliases
++type ListOption = client.ListOption
++type CreateOption = client.CreateOption
++type UpdateOption = client.UpdateOption
++type PatchOption = client.PatchOption
++type DeleteOption = client.DeleteOption
++type DeleteAllOfOption = client.DeleteAllOfOption
++
++// Helper functions from client.
++var ObjectKeyFromObject = client.ObjectKeyFromObject
++
++// Namespacer is an interface that lets the caller know if a GroupKind is namespaced
++// or not. There are two ready-made implementations:
++// 1. RESTMapperToNamespacer
++// 2. NewStaticNamespacer
++type Namespacer interface {
++ // IsNamespaced returns true if the GroupKind is a namespaced type
++ IsNamespaced(gk schema.GroupKind) (bool, error)
++}
++
++// TODO: Investigate if the ObjectRecognizer should return unversioned
++// or versioned ObjectID's
++type ObjectRecognizer interface {
++ ResolveObjectID(ctx context.Context, fileName string, content []byte) (ObjectID, error)
++}
++
++// UnversionedObjectID represents an ID for an Object whose version is not known.
++// However, the Group, Kind, Name and optionally, Namespace is known and should
++// uniquely identify the Object at a specific moment in time.
++type UnversionedObjectID interface {
++ GroupKind() GroupKind
++ ObjectKey() ObjectKey
++
++ WithVersion(version string) ObjectID
++}
++
++// ObjectID is a superset of UnversionedObjectID, that also specifies an exact version.
++type ObjectID interface {
++ UnversionedObjectID
++
++ GroupVersionKind() GroupVersionKind
++}
++
++// VersionRef is an interface that describes a reference to a specific version
++// of Objects in a Storage or Client.
++type VersionRef interface {
++ // String returns the commit or branch name.
++ String() string
++ // IsWritable determines if the VersionRef points to such a state where it
++ // is possible to write on top of it, i.e. as in the case of a Git branch.
++ //
++ // A specific Git commit, however, isn't considered writable, as it points
++ // to a specific point in time that can't just be rewritten, (assuming this
++ // library only is additive, which it is).
++ IsWritable() bool
++ // IsZeroValue determines if this VersionRef is the "zero value", which means
++ // that the caller should figure out how to handle that the user did not
++ // give specific opinions of what version of the Object to get.
++ IsZeroValue() bool
++}
+diff --git a/pkg/storage/core/namespaces.go b/pkg/storage/core/namespaces.go
+new file mode 100644
+index 0000000..d0929f5
+--- /dev/null
++++ b/pkg/storage/core/namespaces.go
+@@ -0,0 +1,37 @@
++package core
++
++import (
++ "k8s.io/apimachinery/pkg/runtime/schema"
++)
++
++// StaticNamespacer implements Namespacer
++var _ Namespacer = StaticNamespacer{}
++
++// StaticNamespacer has a default policy, which is that objects are in general namespaced
++// (NamespacedIsDefaultPolicy == true), or that they are in general root-scoped
++// (NamespacedIsDefaultPolicy == false).
++//
++// To the default policy, Exceptions can be added, so that for that GroupKind, the default
++// policy is reversed.
++type StaticNamespacer struct {
++ NamespacedIsDefaultPolicy bool
++ Exceptions []schema.GroupKind
++}
++
++func (n StaticNamespacer) IsNamespaced(gk schema.GroupKind) (bool, error) {
++ if n.NamespacedIsDefaultPolicy {
++ // namespace by default, the gks list is a list of root-scoped entities
++ return !n.gkIsException(gk), nil
++ }
++ // root by default, the gks in the list are namespaced
++ return n.gkIsException(gk), nil
++}
++
++func (n StaticNamespacer) gkIsException(target schema.GroupKind) bool {
++ for _, gk := range n.Exceptions {
++ if gk == target {
++ return true
++ }
++ }
++ return false
++}
+diff --git a/pkg/storage/core/objectid.go b/pkg/storage/core/objectid.go
+new file mode 100644
+index 0000000..8dc747b
+--- /dev/null
++++ b/pkg/storage/core/objectid.go
+@@ -0,0 +1,29 @@
++package core
++
++import "k8s.io/apimachinery/pkg/runtime/schema"
++
++// NewUnversionedObjectID creates a new UnversionedObjectID from the given GroupKind and ObjectKey.
++func NewUnversionedObjectID(gk GroupKind, key ObjectKey) UnversionedObjectID {
++ return unversionedObjectID{gk, key}
++}
++
++type unversionedObjectID struct {
++ gk GroupKind
++ key ObjectKey
++}
++
++func (o unversionedObjectID) GroupKind() GroupKind { return o.gk }
++func (o unversionedObjectID) ObjectKey() ObjectKey { return o.key }
++func (o unversionedObjectID) WithVersion(version string) ObjectID { return objectID{o, version} }
++
++// NewObjectID creates a new ObjectID from the given GroupVersionKind and ObjectKey.
++func NewObjectID(gvk GroupVersionKind, key ObjectKey) ObjectID {
++ return objectID{unversionedObjectID{gvk.GroupKind(), key}, gvk.Version}
++}
++
++type objectID struct {
++ unversionedObjectID
++ version string
++}
++
++func (o objectID) GroupVersionKind() schema.GroupVersionKind { return o.gk.WithVersion(o.version) }
+diff --git a/pkg/storage/core/recognizer.go b/pkg/storage/core/recognizer.go
+new file mode 100644
+index 0000000..fac0fe1
+--- /dev/null
++++ b/pkg/storage/core/recognizer.go
+@@ -0,0 +1,58 @@
++package core
++
++import (
++ "context"
++ "errors"
++ "fmt"
++
++ "github.com/weaveworks/libgitops/pkg/serializer"
++ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
++)
++
++// SerializerObjectRecognizer implements ObjectRecognizer.
++var _ ObjectRecognizer = &SerializerObjectRecognizer{}
++
++// SerializerObjectRecognizer is a simple implementation of ObjectRecognizer, that
++// decodes the given byte content with the assumption that it is YAML (which covers
++// both YAML and JSON formats) into a *metav1.PartialObjectMetadata, which allows
++// extracting the ObjectID from any Kubernetes API Machinery-compatible Object.
++//
++// This operation works even though *metav1.PartialObjectMetadata is not registered
++// with the underlying Scheme in any way.
++type SerializerObjectRecognizer struct {
++ // Serializer is a required field in order for ResolveObjectID to function.
++ Serializer serializer.Serializer
++ // AllowUnrecognized controls whether this implementation allows recognizing
++ // GVK combinations not known to the underlying Scheme. Default: false
++ AllowUnrecognized bool
++}
++
++func (r *SerializerObjectRecognizer) ResolveObjectID(_ context.Context, _ string, content []byte) (ObjectID, error) {
++ if r.Serializer == nil {
++ return nil, errors.New("programmer error: SerializerObjectRecognizer.Serializer is nil")
++ }
++ metaObj := &metav1.PartialObjectMetadata{}
++ err := r.Serializer.Decoder().DecodeInto(
++ serializer.NewSingleFrameReader(content, serializer.ContentTypeYAML),
++ metaObj,
++ )
++ if err != nil {
++ return nil, err
++ }
++ // Validate the object info
++ gvk := metaObj.GroupVersionKind()
++ if gvk.Group == "" && gvk.Version == "" {
++ return nil, fmt.Errorf(".apiVersion field must not be empty")
++ }
++ if gvk.Kind == "" {
++ return nil, fmt.Errorf(".kind field must not be empty")
++ }
++ if metaObj.Kind == "" {
++ return nil, fmt.Errorf(".metadata.name field must not be empty")
++ }
++ if !r.AllowUnrecognized && !r.Serializer.Scheme().Recognizes(gvk) {
++ return nil, fmt.Errorf("GroupVersionKind %v not recognized by the scheme", gvk)
++ }
++
++ return NewObjectID(metaObj.GroupVersionKind(), ObjectKeyFromObject(metaObj)), nil
++}
+diff --git a/pkg/storage/core/versionref.go b/pkg/storage/core/versionref.go
+new file mode 100644
+index 0000000..c9b3892
+--- /dev/null
++++ b/pkg/storage/core/versionref.go
+@@ -0,0 +1,80 @@
++package core
++
++import (
++ "context"
++ "errors"
++)
++
++var versionRefKey = versionRefKeyImpl{}
++
++type versionRefKeyImpl struct{}
++
++// WithVersionRef attaches the given VersionRef to a Context (it
++// overwrites if one already exists in ctx). The key for the ref
++// is private in this package, so one must use this function to
++// register it.
++func WithVersionRef(ctx context.Context, ref VersionRef) context.Context {
++ return context.WithValue(ctx, versionRefKey, ref)
++}
++
++// GetVersionRef returns the VersionRef attached to this context.
++// If there is no attached VersionRef, or it is nil, a BranchRef
++// with branch "" will be returned as the "zero value" of VersionRef.
++func GetVersionRef(ctx context.Context) VersionRef {
++ r, ok := ctx.Value(versionRefKey).(VersionRef)
++ // Return default ref if none specified
++ if r == nil || !ok {
++ return NewBranchRef("")
++ }
++ return r
++}
++
++var ErrInvalidVersionRefType = errors.New("invalid version ref type")
++
++// NewBranchRef creates a new VersionRef for a given branch. It is
++// valid for the branch to be ""; in this case it means the "zero
++// value", or unspecified branch to be more precise, where the caller
++// can choose how to handle.
++func NewBranchRef(branch string) VersionRef { return branchRef{branch} }
++
++// NewCommitRef creates a new VersionRef for the given commit. The
++// commit must uniquely define a certain revision precisely. It must
++// not be an empty string.
++func NewCommitRef(commit string) (VersionRef, error) {
++ if len(commit) == 0 {
++ return nil, errors.New("commit must not be an empty string")
++ }
++ return commitRef{commit}, nil
++}
++
++// MustNewCommitRef runs NewCommitRef, but panics on errors
++func MustNewCommitRef(commit string) VersionRef {
++ ref, err := NewCommitRef(commit)
++ if err != nil {
++ panic(err)
++ }
++ return ref
++}
++
++type branchRef struct{ branch string }
++
++func (r branchRef) String() string { return r.branch }
++
++// A branch is considered writable, as commits can be added to it by libgitops
++func (branchRef) IsWritable() bool { return true }
++
++// A branch is considered the zero value if the branch is an empty string,
++// which it is e.g. when there was no VersionRef associated with a Context.
++func (r branchRef) IsZeroValue() bool { return r.branch == "" }
++
++type commitRef struct{ commit string }
++
++func (r commitRef) String() string { return r.commit }
++
++// A commit is not considered writable, as it is only a read snapshot of
++// a specific point in time.
++func (commitRef) IsWritable() bool { return false }
++
++// IsZeroValue should always return false for commits; as commit is mandatory
++// to be a non-empty string.
++func (r commitRef) IsZeroValue() bool { return r.commit == "" }
+diff --git a/pkg/storage/event/event.go b/pkg/storage/event/event.go
+new file mode 100644
+index 0000000..3f57fdb
+--- /dev/null
++++ b/pkg/storage/event/event.go
+@@ -0,0 +1,48 @@
++package event
++
++import (
++ "fmt"
++
++ "github.com/weaveworks/libgitops/pkg/storage/core"
++)
++
++// ObjectEventType is an enum describing a change in an Object's state.
++type ObjectEventType byte
++
++var _ fmt.Stringer = ObjectEventType(0)
++
++const (
++ ObjectEventNone ObjectEventType = iota // 0
++ ObjectEventCreate // 1
++ ObjectEventUpdate // 2
++ ObjectEventDelete // 3
++ ObjectEventSync // 4
++)
++
++func (o ObjectEventType) String() string {
++ switch o {
++ case 0:
++ return "NONE"
++ case 1:
++ return "CREATE"
++ case 2:
++ return "UPDATE"
++ case 3:
++ return "DELETE"
++ case 4:
++ return "SYNC"
++ }
++
++ // Should never happen
++ return "UNKNOWN"
++}
++
++// ObjectEvent describes a change that has been observed
++// for the given object with the given ID.
++type ObjectEvent struct {
++ ID core.UnversionedObjectID
++ Type ObjectEventType
++}
++
++// ObjectEventStream is a channel of ObjectEvents
++type ObjectEventStream chan *ObjectEvent
+diff --git a/pkg/storage/event/interfaces.go b/pkg/storage/event/interfaces.go
+new file mode 100644
+index 0000000..b13c186
+--- /dev/null
++++ b/pkg/storage/event/interfaces.go
+@@ -0,0 +1,31 @@
++package event
++
++import (
++ "context"
++ "io"
++
++ "github.com/weaveworks/libgitops/pkg/storage"
++)
++
++// StorageCommon contains the methods that EventStorage adds to the
++// to the normal Storage.
++type StorageCommon interface {
++ // WatchForObjectEvents starts feeding ObjectEvents into the given "into"
++ // channel. The caller is responsible for setting a channel buffering
++ // limit large enough to not block normal operation. An error might
++ // be returned if a maximum amount of watches has been opened already,
++ // e.g. ErrTooManyWatches.
++ WatchForObjectEvents(ctx context.Context, into ObjectEventStream) error
++
++ // Close closes the EventStorage and underlying resources gracefully.
++ io.Closer
++}
++
++// EventStorage is the abstract combination of a normal Storage, and
++// a possiblility to listen for changes to objects as they change.
++// TODO: Maybe we could use some of controller-runtime's built-in functionality
++// for watching for changes?
++type EventStorage interface {
++ storage.Storage
++ StorageCommon
++}
+diff --git a/pkg/storage/filesystem/dir_traversal.go b/pkg/storage/filesystem/dir_traversal.go
+new file mode 100644
+index 0000000..12284d7
+--- /dev/null
++++ b/pkg/storage/filesystem/dir_traversal.go
+@@ -0,0 +1,37 @@
++package filesystem
++
++import (
++ "context"
++ "os"
++)
++
++// ListValidFilesInFilesystem discovers files in the given Filesystem that has a
++// ContentType that contentTyper recognizes, and is not a path that is excluded by
++// pathExcluder.
++func ListValidFilesInFilesystem(ctx context.Context, fs Filesystem, contentTyper ContentTyper, pathExcluder PathExcluder) (files []string, err error) {
++ err = fs.Walk(ctx, "", func(path string, info os.FileInfo, err error) error {
++ if err != nil {
++ return err
++ }
++
++ // Only include valid files
++ if !info.IsDir() && IsValidFileInFilesystem(ctx, fs, contentTyper, pathExcluder, path) {
++ files = append(files, path)
++ }
++ return nil
++ })
++ return
++}
++
++// IsValidFileInFilesystem checks if file (a relative path) has a ContentType
++// that contentTyper recognizes, and is not a path that is excluded by pathExcluder.
++func IsValidFileInFilesystem(ctx context.Context, fs Filesystem, contentTyper ContentTyper, pathExcluder PathExcluder, file string) bool {
++ // return false if this path should be excluded
++ if pathExcluder.ShouldExcludePath(file) {
++ return false
++ }
++
++ // If the content type is valid for this path, err == nil => return true
++ _, err := contentTyper.ContentTypeForPath(ctx, fs, file)
++ return err == nil
++}
+diff --git a/pkg/storage/filesystem/fileevents/events.go b/pkg/storage/filesystem/fileevents/events.go
+new file mode 100644
+index 0000000..38c385a
+--- /dev/null
++++ b/pkg/storage/filesystem/fileevents/events.go
+@@ -0,0 +1,36 @@
++package fileevents
++
++// FileEventType is an enum describing a change in a file's state
++type FileEventType byte
++
++const (
++ FileEventNone FileEventType = iota // 0
++ FileEventModify // 1
++ FileEventDelete // 2
++ FileEventMove // 3
++)
++
++func (e FileEventType) String() string {
++ switch e {
++ case 0:
++ return "NONE"
++ case 1:
++ return "MODIFY"
++ case 2:
++ return "DELETE"
++ case 3:
++ return "MOVE"
++ }
++
++ return "UNKNOWN"
++}
++
++// FileEvent describes a file change of a certain kind at a certain
++// (relative) path. Often emitted by FileEventsEmitter.
++type FileEvent struct {
++ Path string
++ Type FileEventType
++}
++
++// FileEventStream is a channel of FileEvents
++type FileEventStream chan *FileEvent
+diff --git a/pkg/util/watcher/filewatcher.go b/pkg/storage/filesystem/fileevents/inotify/filewatcher.go
+similarity index 52%
+rename from pkg/util/watcher/filewatcher.go
+rename to pkg/storage/filesystem/fileevents/inotify/filewatcher.go
+index 67db335..58d8518 100644
+--- a/pkg/util/watcher/filewatcher.go
++++ b/pkg/storage/filesystem/fileevents/inotify/filewatcher.go
+@@ -1,46 +1,26 @@
+-package watcher
++package inotify
+
+ import (
++ "context"
+ "fmt"
+- "path"
++ "path/filepath"
++ gosync "sync"
+ "time"
+
+ "github.com/rjeczalik/notify"
++ "github.com/sirupsen/logrus"
+ log "github.com/sirupsen/logrus"
++ "github.com/weaveworks/libgitops/pkg/storage/filesystem/fileevents"
+ "github.com/weaveworks/libgitops/pkg/util/sync"
+ "golang.org/x/sys/unix"
++ "k8s.io/apimachinery/pkg/util/sets"
+ )
+
+-const eventBuffer = 4096 // How many events and updates we can buffer before watching is interrupted
+ var listenEvents = []notify.Event{notify.InDelete, notify.InCloseWrite, notify.InMovedFrom, notify.InMovedTo}
+
+-var eventMap = map[notify.Event]FileEvent{
+- notify.InDelete: FileEventDelete,
+- notify.InCloseWrite: FileEventModify,
+-}
+-
+-// combinedEvent describes multiple events that should be concatenated into a single event
+-type combinedEvent struct {
+- input []notify.Event // input is a slice of events to match (in bytes, it speeds up the comparison)
+- output int // output is the event's index that should be returned, negative values equal nil
+-}
+-
+-func (c *combinedEvent) match(events notifyEvents) (notify.EventInfo, bool) {
+- if len(c.input) > len(events) {
+- return nil, false // Not enough events, cannot match
+- }
+-
+- for i := 0; i < len(c.input); i++ {
+- if events[i].Event() != c.input[i] {
+- return nil, false
+- }
+- }
+-
+- if c.output > 0 {
+- return events[c.output], true
+- }
+-
+- return nil, true
++var eventMap = map[notify.Event]fileevents.FileEventType{
++ notify.InDelete: fileevents.FileEventDelete,
++ notify.InCloseWrite: fileevents.FileEventModify,
+ }
+
+ // combinedEvents describes the event combinations to concatenate,
+@@ -54,99 +34,113 @@ var combinedEvents = []combinedEvent{
+
+ type notifyEvents []notify.EventInfo
+ type eventStream chan notify.EventInfo
+-type FileUpdateStream chan *FileUpdate
+-
+-// Options specifies options for the FileWatcher
+-type Options struct {
+- // ExcludeDirs specifies what directories to not watch
+- ExcludeDirs []string
+- // BatchTimeout specifies the duration to wait after last event before dispatching grouped inotify events
+- BatchTimeout time.Duration
+- // ValidExtensions specifies what file extensions to look at
+- ValidExtensions []string
+-}
+
+-// DefaultOptions returns the default options
+-func DefaultOptions() Options {
+- return Options{
+- ExcludeDirs: []string{".git"},
+- BatchTimeout: 1 * time.Second,
+- ValidExtensions: []string{".yaml", ".yml", ".json"},
+- }
+-}
++// FileEvents is a slice of FileEvent pointers
++type FileEvents []*fileevents.FileEvent
+
+ // NewFileWatcher returns a list of files in the watched directory in
+ // addition to the generated FileWatcher, it can be used to populate
+ // MappedRawStorage fileMappings
+-func NewFileWatcher(dir string) (w *FileWatcher, files []string, err error) {
+- return NewFileWatcherWithOptions(dir, DefaultOptions())
+-}
++func NewFileWatcher(dir string, opts ...FileWatcherOption) (fileevents.Emitter, error) {
++ o := defaultOptions().ApplyOptions(opts)
+
+-// NewFileWatcher returns a list of files in the watched directory in
+-// addition to the generated FileWatcher, it can be used to populate
+-// MappedRawStorage fileMappings
+-func NewFileWatcherWithOptions(dir string, opts Options) (w *FileWatcher, files []string, err error) {
+- w = &FileWatcher{
+- dir: dir,
+- events: make(eventStream, eventBuffer),
+- updates: make(FileUpdateStream, eventBuffer),
+- batcher: sync.NewBatchWriter(opts.BatchTimeout),
+- opts: opts,
++ w := &FileWatcher{
++ dir: dir,
++
++ inbound: make(eventStream, int(o.EventBufferSize)),
++ // outbound is set by WatchForFileEvents
++ outboundMu: &gosync.Mutex{},
++
++ suspendFiles: sets.NewString(),
++ suspendFilesMu: &gosync.Mutex{},
++
++ // monitor and dispatcher set by WatchForFileEvents, guarded by outboundMu
++
++ opts: *o,
++
++ batcher: sync.NewBatchWriter(o.BatchTimeout),
+ }
+
+ log.Tracef("FileWatcher: Starting recursive watch for %q", dir)
+- if err = notify.Watch(path.Join(dir, "..."), w.events, listenEvents...); err != nil {
+- notify.Stop(w.events)
+- } else if files, err = w.getFiles(); err == nil {
+- w.monitor = sync.RunMonitor(w.monitorFunc)
+- w.dispatcher = sync.RunMonitor(w.dispatchFunc)
++ if err := notify.Watch(filepath.Join(dir, "..."), w.inbound, listenEvents...); err != nil {
++ notify.Stop(w.inbound)
++ return nil, err
+ }
+
+- return
++ return w, nil
+ }
+
++var _ fileevents.Emitter = &FileWatcher{}
++
+ // FileWatcher recursively monitors changes in files in the given directory
+ // and sends out events based on their state changes. Only files conforming
+ // to validSuffix are monitored. The FileWatcher can be suspended for a single
+ // event at a time to eliminate updates by WatchStorage causing a loop.
+ type FileWatcher struct {
+- dir string
+- events eventStream
+- updates FileUpdateStream
+- suspendEvent FileEvent
+- monitor *sync.Monitor
+- dispatcher *sync.Monitor
+- opts Options
++ dir string
++ // channels
++ inbound eventStream
++ outbound fileevents.FileEventStream
++ outboundMu *gosync.Mutex
++ // new suspend logic
++ suspendFiles sets.String
++ suspendFilesMu *gosync.Mutex
++ // goroutines
++ monitor *sync.Monitor
++ dispatcher *sync.Monitor
++
++ // opts
++ opts FileWatcherOptions
+ // the batcher is used for properly sending many concurrent inotify events
+ // as a group, after a specified timeout. This fixes the issue of one single
+ // file operation being registered as many different inotify events
+ batcher *sync.BatchWriter
+ }
+
+-func (w *FileWatcher) monitorFunc() {
++func (w *FileWatcher) WatchForFileEvents(ctx context.Context, into fileevents.FileEventStream) error {
++ w.outboundMu.Lock()
++ defer w.outboundMu.Unlock()
++ // We don't support more than one listener
++ // TODO: maybe support many listeners in the future?
++ if w.outbound != nil {
++ return fmt.Errorf("FileWatcher: not more than one watch supported: %w", fileevents.ErrTooManyWatches)
++ }
++ w.outbound = into
++ // Start the backing goroutines
++ w.monitor = sync.RunMonitor(w.monitorFunc)
++ w.dispatcher = sync.RunMonitor(w.dispatchFunc)
++ return nil // all ok
++}
++
++func (w *FileWatcher) monitorFunc() error {
+ log.Debug("FileWatcher: Monitoring thread started")
+ defer log.Debug("FileWatcher: Monitoring thread stopped")
+- defer close(w.updates) // Close the update stream after the FileWatcher has stopped
++ defer close(w.outbound) // Close the update stream after the FileWatcher has stopped
+
+ for {
+- event, ok := <-w.events
++ event, ok := <-w.inbound
+ if !ok {
+- return
++ logrus.Debug("FileWatcher: Got non-ok channel recieve from w.inbound, exiting monitorFunc")
++ return nil
+ }
+
+ if ievent(event).Mask&unix.IN_ISDIR != 0 {
+ continue // Skip directories
+ }
+
+- if !w.validFile(event.Path()) {
+- continue // Skip invalid files
++ // Get the relative path between the root directory and the changed file
++ // Note: This is just used for the PathExcluder, absolute paths are used
++ // in the underlying file-change computation system, until in sendUpdate
++ // where they are converted into relative paths before sending to the listener.
++ relativePath, err := filepath.Rel(w.dir, event.Path())
++ if err != nil {
++ logrus.Errorf("FileWatcher: Error occurred when computing relative path between: %s and %s: %v", w.dir, event.Path(), err)
++ continue
+ }
+
+- updateEvent := convertEvent(event.Event())
+- if w.suspendEvent > 0 && updateEvent == w.suspendEvent {
+- w.suspendEvent = 0
+- log.Debugf("FileWatcher: Skipping suspended event %s for path: %q", updateEvent, event.Path())
+- continue // Skip the suspended event
++ // The PathExcluder only operates on relative paths.
++ if w.opts.PathExcluder.ShouldExcludePath(relativePath) {
++ continue // Skip ignored files
+ }
+
+ // Get any events registered for the specific file, and append the specified event
+@@ -158,18 +152,20 @@ func (w *FileWatcher) monitorFunc() {
+ eventList = append(eventList, event)
+
+ // Register the event in the map, and dispatch all the events at once after the timeout
++ // Note that event.Path() is just the unique key for the map here, it is not actually
++ // used later when computing the changes of the filesystem.
+ w.batcher.Store(event.Path(), eventList)
+ log.Debugf("FileWatcher: Registered inotify events %v for path %q", eventList, event.Path())
+ }
+ }
+
+-func (w *FileWatcher) dispatchFunc() {
++func (w *FileWatcher) dispatchFunc() error {
+ log.Debug("FileWatcher: Dispatch thread started")
+ defer log.Debug("FileWatcher: Dispatch thread stopped")
+
+ for {
+ // Wait until we have a batch dispatched to us
+- ok := w.batcher.ProcessBatch(func(key, val interface{}) bool {
++ ok := w.batcher.ProcessBatch(func(_, val interface{}) bool {
+ // Concatenate all known events, and dispatch them to be handled one by one
+ for _, event := range w.concatenateEvents(val.(notifyEvents)) {
+ w.sendUpdate(event)
+@@ -179,56 +175,85 @@ func (w *FileWatcher) dispatchFunc() {
+ return true
+ })
+ if !ok {
+- return // The BatchWriter channel is closed, stop processing
++ logrus.Debug("FileWatcher: Got non-ok channel recieve from w.batcher, exiting dispatchFunc")
++ return nil // The BatchWriter channel is closed, stop processing
+ }
+
+ log.Debug("FileWatcher: Dispatched events batch and reset the events cache")
+ }
+ }
+
+-func (w *FileWatcher) sendUpdate(update *FileUpdate) {
+- log.Debugf("FileWatcher: Sending update: %s -> %q", update.Event, update.Path)
+- w.updates <- update
+-}
++func (w *FileWatcher) sendUpdate(event *fileevents.FileEvent) {
++ // Get the relative path between the root directory and the changed file
++ relativePath, err := filepath.Rel(w.dir, event.Path)
++ if err != nil {
++ logrus.Errorf("FileWatcher: Error occurred when computing relative path between: %s and %s: %v", w.dir, event.Path, err)
++ return
++ }
++ // Replace the full path with the relative path for the signaling upstream
++ event.Path = relativePath
+
+-// GetFileUpdateStream gets the channel with FileUpdates
+-func (w *FileWatcher) GetFileUpdateStream() FileUpdateStream {
+- return w.updates
++ if w.shouldSuspendEvent(event.Path) {
++ log.Debugf("FileWatcher: Skipping suspended event %s for path: %q", event.Type, event.Path)
++ return // Skip the suspended event
++ }
++
++ log.Debugf("FileWatcher: Sending update: %s -> %q", event.Type, event.Path)
++ w.outbound <- event
+ }
+
+ // Close closes active underlying resources
+-func (w *FileWatcher) Close() {
+- notify.Stop(w.events)
++func (w *FileWatcher) Close() error {
++ notify.Stop(w.inbound)
+ w.batcher.Close()
+- close(w.events) // Close the event stream
+- w.monitor.Wait()
+- w.dispatcher.Wait()
++ close(w.inbound) // Close the inbound event stream
++ // No need to check the error here, as we only return nil above
++ _ = w.monitor.Wait()
++ _ = w.dispatcher.Wait()
++ return nil
+ }
+
+-// Suspend enables a one-time suspend of the given event,
+-// the FileWatcher will skip the given event once
+-func (w *FileWatcher) Suspend(updateEvent FileEvent) {
+- w.suspendEvent = updateEvent
++// Suspend enables a one-time suspend of the given path
++// TODO: clarify how the path should be formatted
++func (w *FileWatcher) Suspend(_ context.Context, path string) {
++ w.suspendFilesMu.Lock()
++ defer w.suspendFilesMu.Unlock()
++ w.suspendFiles.Insert(path)
+ }
+
+-func convertEvent(event notify.Event) FileEvent {
++// shouldSuspendEvent checks if an event for the given path
++// should be suspended for one time. If it should, true will
++// be returned, and the mapping will be removed next time.
++func (w *FileWatcher) shouldSuspendEvent(path string) bool {
++ w.suspendFilesMu.Lock()
++ defer w.suspendFilesMu.Unlock()
++ // If the path should not be suspended, just return false and be done
++ if !w.suspendFiles.Has(path) {
++ return false
++ }
++ // Otherwise, remove it from the list and mark it as suspended
++ w.suspendFiles.Delete(path)
++ return true
++}
++
++func convertEvent(event notify.Event) fileevents.FileEventType {
+ if updateEvent, ok := eventMap[event]; ok {
+ return updateEvent
+ }
+
+- return FileEventNone
++ return fileevents.FileEventNone
+ }
+
+-func convertUpdate(event notify.EventInfo) *FileUpdate {
++func convertUpdate(event notify.EventInfo) *fileevents.FileEvent {
+ fileEvent := convertEvent(event.Event())
+- if fileEvent == FileEventNone {
++ if fileEvent == fileevents.FileEventNone {
+ // This should never happen
+ panic(fmt.Sprintf("invalid event for update conversion: %q", event.Event().String()))
+ }
+
+- return &FileUpdate{
+- Event: fileEvent,
+- Path: event.Path(),
++ return &fileevents.FileEvent{
++ Path: event.Path(),
++ Type: fileEvent,
+ }
+ }
+
+@@ -247,7 +272,7 @@ func (w *FileWatcher) newMoveCache(event notify.EventInfo) *moveCache {
+ }
+
+ // moveCaches wait one second to be cancelled before firing
+- m.timer = time.AfterFunc(time.Second, m.incomplete)
++ m.timer = time.AfterFunc(w.opts.BatchTimeout, m.incomplete)
+ return m
+ }
+
+@@ -260,42 +285,53 @@ func (m *moveCache) cookie() uint32 {
+ // if only one is received, the file is moved in/out of a watched directory, which
+ // is treated as a normal creation/deletion by this method.
+ func (m *moveCache) incomplete() {
+- var event FileEvent
++ var evType fileevents.FileEventType
+
+ switch m.event.Event() {
+ case notify.InMovedFrom:
+- event = FileEventDelete
++ evType = fileevents.FileEventDelete
+ case notify.InMovedTo:
+- event = FileEventModify
++ evType = fileevents.FileEventModify
+ default:
+ // This should never happen
+ panic(fmt.Sprintf("moveCache: unrecognized event: %v", m.event.Event()))
+ }
+
+ log.Tracef("moveCache: Timer expired for %d, dispatching...", m.cookie())
+- m.watcher.sendUpdate(&FileUpdate{event, m.event.Path()})
++ m.watcher.sendUpdate(&fileevents.FileEvent{Path: m.event.Path(), Type: evType})
+
+ // Delete the cache after the timer has fired
++ moveCachesMu.Lock()
+ delete(moveCaches, m.cookie())
++ moveCachesMu.Unlock()
+ }
+
+ func (m *moveCache) cancel() {
+ m.timer.Stop()
++ moveCachesMu.Lock()
+ delete(moveCaches, m.cookie())
++ moveCachesMu.Unlock()
+ log.Tracef("moveCache: Dispatching cancelled for %d", m.cookie())
+ }
+
+-// moveCaches keeps track of active moves by cookie
+-var moveCaches = make(map[uint32]*moveCache)
++var (
++ // moveCaches keeps track of active moves by cookie
++ moveCaches = make(map[uint32]*moveCache)
++ moveCachesMu = &gosync.RWMutex{}
++)
+
+ // move processes InMovedFrom and InMovedTo events in any order
+ // and dispatches FileUpdates when a move is detected
+-func (w *FileWatcher) move(event notify.EventInfo) (moveUpdate *FileUpdate) {
++func (w *FileWatcher) move(event notify.EventInfo) (moveUpdate *fileevents.FileEvent) {
+ cookie := ievent(event).Cookie
++ moveCachesMu.RLock()
+ cache, ok := moveCaches[cookie]
++ moveCachesMu.RUnlock()
+ if !ok {
+ // The cookie is not cached, create a new cache object for it
++ moveCachesMu.Lock()
+ moveCaches[cookie] = w.newMoveCache(event)
++ moveCachesMu.Unlock()
+ return
+ }
+
+@@ -305,8 +341,8 @@ func (w *FileWatcher) move(event notify.EventInfo) (moveUpdate *FileUpdate) {
+ sourcePath, destPath = destPath, sourcePath
+ fallthrough
+ case notify.InMovedTo:
+- cache.cancel() // Cancel dispatching the cache's incomplete move
+- moveUpdate = &FileUpdate{FileEventMove, destPath} // Register an internal, complete move instead
++ cache.cancel() // Cancel dispatching the cache's incomplete move
++ moveUpdate = &fileevents.FileEvent{Path: destPath, Type: fileevents.FileEventMove} // Register an internal, complete move instead
+ log.Tracef("FileWatcher: Detected move: %q -> %q", sourcePath, destPath)
+ }
+
+@@ -315,8 +351,8 @@ func (w *FileWatcher) move(event notify.EventInfo) (moveUpdate *FileUpdate) {
+
+ // concatenateEvents takes in a slice of events and concatenates
+ // all events possible based on combinedEvents. It also manages
+-// file moving and conversion from notifyEvents to FileUpdates
+-func (w *FileWatcher) concatenateEvents(events notifyEvents) FileUpdates {
++// file moving and conversion from notifyEvents to FileEvents
++func (w *FileWatcher) concatenateEvents(events notifyEvents) FileEvents {
+ for _, combinedEvent := range combinedEvents {
+ // Test if the prefix of the given events matches combinedEvent.input
+ if event, ok := combinedEvent.match(events); ok {
+@@ -332,7 +368,7 @@ func (w *FileWatcher) concatenateEvents(events notifyEvents) FileUpdates {
+ }
+
+ // Convert the events to updates
+- updates := make(FileUpdates, 0, len(events))
++ updates := make(FileEvents, 0, len(events))
+ for _, event := range events {
+ switch event.Event() {
+ case notify.InMovedFrom, notify.InMovedTo:
+@@ -352,3 +388,27 @@ func (w *FileWatcher) concatenateEvents(events notifyEvents) FileUpdates {
+ func ievent(event notify.EventInfo) *unix.InotifyEvent {
+ return event.Sys().(*unix.InotifyEvent)
+ }
++
++// combinedEvent describes multiple events that should be concatenated into a single event
++type combinedEvent struct {
++ input []notify.Event // input is a slice of events to match (in bytes, it speeds up the comparison)
++ output int // output is the event's index that should be returned, negative values equal nil
++}
++
++func (c *combinedEvent) match(events notifyEvents) (notify.EventInfo, bool) {
++ if len(c.input) > len(events) {
++ return nil, false // Not enough events, cannot match
++ }
++
++ for i := 0; i < len(c.input); i++ {
++ if events[i].Event() != c.input[i] {
++ return nil, false
++ }
++ }
++
++ if c.output > 0 {
++ return events[c.output], true
++ }
++
++ return nil, true
++}
+diff --git a/pkg/util/watcher/filewatcher_test.go b/pkg/storage/filesystem/fileevents/inotify/filewatcher_test.go
+similarity index 60%
+rename from pkg/util/watcher/filewatcher_test.go
+rename to pkg/storage/filesystem/fileevents/inotify/filewatcher_test.go
+index b80f9b2..c423f24 100644
+--- a/pkg/util/watcher/filewatcher_test.go
++++ b/pkg/storage/filesystem/fileevents/inotify/filewatcher_test.go
+@@ -1,9 +1,12 @@
+-package watcher
++package inotify
+
+ import (
++ "fmt"
++ "strings"
+ "testing"
+
+ "github.com/rjeczalik/notify"
++ "github.com/weaveworks/libgitops/pkg/storage/filesystem/fileevents"
+ "golang.org/x/sys/unix"
+ )
+
+@@ -51,33 +54,33 @@ var testEvents = []notifyEvents{
+ },
+ }
+
+-var targets = []FileEvents{
++var targets = []FileEventTypes{
+ {
+- FileEventModify,
++ fileevents.FileEventModify,
+ },
+ {
+- FileEventDelete,
++ fileevents.FileEventDelete,
+ },
+ {
+- FileEventModify,
+- FileEventMove,
+- FileEventDelete,
++ fileevents.FileEventModify,
++ fileevents.FileEventMove,
++ fileevents.FileEventDelete,
+ },
+ {
+- FileEventModify,
++ fileevents.FileEventModify,
+ },
+ {},
+ }
+
+-func extractEvents(updates FileUpdates) (events FileEvents) {
+- for _, update := range updates {
+- events = append(events, update.Event)
++func extractEventTypes(events FileEvents) (eventTypes FileEventTypes) {
++ for _, event := range events {
++ eventTypes = append(eventTypes, event.Type)
+ }
+
+ return
+ }
+
+-func eventsEqual(a, b FileEvents) bool {
++func eventsEqual(a, b FileEventTypes) bool {
+ if len(a) != len(b) {
+ return false
+ }
+@@ -91,9 +94,23 @@ func eventsEqual(a, b FileEvents) bool {
+ return true
+ }
+
++// FileEventTypes is a slice of FileEventType
++type FileEventTypes []fileevents.FileEventType
++
++var _ fmt.Stringer = FileEventTypes{}
++
++func (e FileEventTypes) String() string {
++ strs := make([]string, 0, len(e))
++ for _, ev := range e {
++ strs = append(strs, ev.String())
++ }
++
++ return strings.Join(strs, ",")
++}
++
+ func TestEventConcatenation(t *testing.T) {
+ for i, e := range testEvents {
+- result := extractEvents((&FileWatcher{}).concatenateEvents(e))
++ result := extractEventTypes((&FileWatcher{}).concatenateEvents(e))
+ if !eventsEqual(result, targets[i]) {
+ t.Errorf("wrong concatenation result: %v != %v", result, targets[i])
+ }
+diff --git a/pkg/storage/filesystem/fileevents/inotify/options.go b/pkg/storage/filesystem/fileevents/inotify/options.go
+new file mode 100644
+index 0000000..2c48e5d
+--- /dev/null
++++ b/pkg/storage/filesystem/fileevents/inotify/options.go
+@@ -0,0 +1,59 @@
++package inotify
++
++import (
++ "time"
++
++ "github.com/weaveworks/libgitops/pkg/storage/filesystem"
++)
++
++// How many inotify events we can buffer before watching is interrupted
++const DefaultEventBufferSize int32 = 4096
++
++type FileWatcherOption interface {
++ ApplyToFileWatcher(*FileWatcherOptions)
++}
++
++var _ FileWatcherOption = &FileWatcherOptions{}
++
++// FileWatcherOptions specifies options for the FileWatcher
++type FileWatcherOptions struct {
++ // BatchTimeout specifies the duration to wait after last event
++ // before dispatching grouped inotify events
++ // Default: 1s
++ BatchTimeout time.Duration
++ // EventBufferSize describes how many inotify events can be buffered
++ // before watching is interrupted/delayed.
++ // Default: DefaultEventBufferSize
++ EventBufferSize int32
++ // PathExcluder provides a way to exclude paths.
++ // Default: filesystem.DefaultPathExcluders()
++ PathExcluder filesystem.PathExcluder
++}
++
++func (o *FileWatcherOptions) ApplyToFileWatcher(target *FileWatcherOptions) {
++ if o.BatchTimeout != 0 {
++ target.BatchTimeout = o.BatchTimeout
++ }
++ if o.EventBufferSize != 0 {
++ target.EventBufferSize = o.EventBufferSize
++ }
++ if o.PathExcluder != nil {
++ target.PathExcluder = o.PathExcluder
++ }
++}
++
++func (o *FileWatcherOptions) ApplyOptions(opts []FileWatcherOption) *FileWatcherOptions {
++ for _, opt := range opts {
++ opt.ApplyToFileWatcher(o)
++ }
++ return o
++}
++
++// defaultOptions returns the default options
++func defaultOptions() *FileWatcherOptions {
++ return &FileWatcherOptions{
++ BatchTimeout: 1 * time.Second,
++ EventBufferSize: DefaultEventBufferSize,
++ PathExcluder: filesystem.DefaultPathExcluders(),
++ }
++}
+diff --git a/pkg/storage/filesystem/fileevents/interfaces.go b/pkg/storage/filesystem/fileevents/interfaces.go
+new file mode 100644
+index 0000000..77d7708
+--- /dev/null
++++ b/pkg/storage/filesystem/fileevents/interfaces.go
+@@ -0,0 +1,57 @@
++package fileevents
++
++import (
++ "context"
++ "errors"
++ "io"
++
++ "github.com/weaveworks/libgitops/pkg/storage/event"
++ "github.com/weaveworks/libgitops/pkg/storage/filesystem"
++)
++
++var (
++ // ErrTooManyWatches can happen when trying to register too many
++ // watching reciever channels to an event emitter.
++ ErrTooManyWatches = errors.New("too many watches already opened")
++)
++
++// Emitter is an interface that provides high-level inotify-like
++// behaviour to consumers. It can be used e.g. by even higher-level
++// interfaces like FilesystemEventStorage.
++type Emitter interface {
++ // WatchForFileEvents starts feeding FileEvents into the given "into"
++ // channel. The caller is responsible for setting a channel buffering
++ // limit large enough to not block normal operation. An error might
++ // be returned if a maximum amount of watches has been opened already,
++ // e.g. ErrTooManyWatches.
++ //
++ // Note that it is the receiver's responsibility to "validate" the
++ // file so it matches any user defined policy (e.g. only specific
++ // content types, or a PathExcluder has been given).
++ WatchForFileEvents(ctx context.Context, into FileEventStream) error
++
++ // Suspend blocks the next event dispatch for this given path. Useful
++ // for not sending "your own" modification events into the
++ // FileEventStream that is listening. path is relative.
++ Suspend(ctx context.Context, path string)
++
++ // Close closes the emitter gracefully.
++ io.Closer
++}
++
++// StorageCommon is an extension to event.StorageCommon that
++// also contains an underlying Emitter. This is meant to be
++// used in tandem with filesystem.Storages.
++type StorageCommon interface {
++ event.StorageCommon
++
++ // FileEventsEmitter gets the Emitter used internally.
++ FileEventsEmitter() Emitter
++}
++
++// FilesystemEventStorage is the combination of a filesystem.Storage,
++// and the possibility to listen for object updates from a Emitter.
++type FilesystemEventStorage interface {
++ filesystem.Storage
++ StorageCommon
++}
+diff --git a/pkg/storage/filesystem/filefinder_simple.go b/pkg/storage/filesystem/filefinder_simple.go
+new file mode 100644
+index 0000000..e0e6940
+--- /dev/null
++++ b/pkg/storage/filesystem/filefinder_simple.go
+@@ -0,0 +1,235 @@
++package filesystem
++
++import (
++ "context"
++ "errors"
++ "fmt"
++ "os"
++ "path/filepath"
++ "strings"
++
++ "github.com/weaveworks/libgitops/pkg/serializer"
++ "github.com/weaveworks/libgitops/pkg/storage/core"
++ "k8s.io/apimachinery/pkg/util/sets"
++)
++
++// NewSimpleStorage is a default opinionated constructor for a Storage
++// using SimpleFileFinder as the FileFinder, and the local disk as target.
++// If you need more advanced customizablility than provided here, you can compose
++// the call to filesystem.NewGeneric yourself.
++func NewSimpleStorage(dir string, namespacer core.Namespacer, opts SimpleFileFinderOptions) (Storage, error) {
++ fs := NewOSFilesystem(dir)
++ fileFinder, err := NewSimpleFileFinder(fs, opts)
++ if err != nil {
++ return nil, err
++ }
++ // fileFinder and namespacer are validated by filesystem.NewGeneric.
++ return NewGeneric(fileFinder, namespacer)
++}
++
++func NewSimpleFileFinder(fs Filesystem, opts SimpleFileFinderOptions) (*SimpleFileFinder, error) {
++ if fs == nil {
++ return nil, fmt.Errorf("NewSimpleFileFinder: fs is mandatory")
++ }
++ ct := serializer.ContentTypeJSON
++ if len(opts.ContentType) != 0 {
++ ct = opts.ContentType
++ }
++ resolver := DefaultFileExtensionResolver
++ if opts.FileExtensionResolver != nil {
++ resolver = opts.FileExtensionResolver
++ }
++ return &SimpleFileFinder{
++ fs: fs,
++ opts: opts,
++ contentTyper: StaticContentTyper{ContentType: ct},
++ resolver: resolver,
++ }, nil
++}
++
++// isObjectIDNamespaced returns true if the ID is of a namespaced GroupKind, and
++// false if the GroupKind is non-namespaced. NOTE: This ONLY works for FileFinders
++// where the Storage has made sure that the namespacing conventions are followed.
++func isObjectIDNamespaced(id core.UnversionedObjectID) bool {
++ return id.ObjectKey().Namespace != ""
++}
++
++var _ FileFinder = &SimpleFileFinder{}
++
++// SimpleFileFinder is a FileFinder-compliant implementation that
++// stores Objects on disk using a straightforward directory layout.
++//
++// The following directory layout is used:
++// if DisableGroupDirectory == false && SubDirectoryFileName == "" {
++//
////. if namespaced or
++// ///. if non-namespaced
++// }
++// else if DisableGroupDirectory == false && SubDirectoryFileName == "foo" {
++// /////foo. if namespaced or
++// ////foo. if non-namespaced
++// }
++// else if DisableGroupDirectory == true && SubDirectoryFileName == "" {
++// ///. if namespaced or
++// //. if non-namespaced
++// }
++// else if DisableGroupDirectory == true && SubDirectoryFileName == "foo" {
++// ////foo. if namespaced or
++// ///foo. if non-namespaced
++// }
++//
++// is resolved by the FileExtensionResolver, for the given ContentType.
++//
++// This FileFinder does not support the ObjectAt method.
++type SimpleFileFinder struct {
++ fs Filesystem
++ opts SimpleFileFinderOptions
++ contentTyper StaticContentTyper
++ resolver FileExtensionResolver
++}
++
++type SimpleFileFinderOptions struct {
++ // Default: false; means enable group directory
++ DisableGroupDirectory bool
++ // Default: ""; means use file names as the means of storage
++ SubDirectoryFileName string
++ // Default: serializer.ContentTypeJSON
++ ContentType serializer.ContentType
++ // Default: DefaultFileExtensionResolver
++ FileExtensionResolver FileExtensionResolver
++}
++
++// TODO: Use group name "core" if group is "" to support core k8s objects.
++
++func (f *SimpleFileFinder) Filesystem() Filesystem {
++ return f.fs
++}
++
++func (f *SimpleFileFinder) ContentTyper() ContentTyper {
++ return f.contentTyper
++}
++
++// ObjectPath gets the file path relative to the root directory
++func (f *SimpleFileFinder) ObjectPath(ctx context.Context, id core.UnversionedObjectID) (string, error) {
++ // //
++ paths := []string{f.kindKeyPath(id.GroupKind())}
++
++ if isObjectIDNamespaced(id) {
++ // .//
++ paths = append(paths, id.ObjectKey().Namespace)
++ }
++ // Get the file extension
++ ext, err := f.ext()
++ if err != nil {
++ return "", err
++ }
++ if f.opts.SubDirectoryFileName == "" {
++ // ./.
++ paths = append(paths, id.ObjectKey().Name+ext)
++ } else {
++ // .//.
++ paths = append(paths, id.ObjectKey().Name, f.opts.SubDirectoryFileName+ext)
++ }
++ return filepath.Join(paths...), nil
++}
++
++func (f *SimpleFileFinder) kindKeyPath(gk core.GroupKind) string {
++ if f.opts.DisableGroupDirectory {
++ // .//
++ return filepath.Join(gk.Kind)
++ }
++ // .///
++ return filepath.Join(gk.Group, gk.Kind)
++}
++
++// ObjectAt retrieves the ID containing the virtual path based
++// on the given physical file path.
++func (f *SimpleFileFinder) ObjectAt(ctx context.Context, path string) (core.UnversionedObjectID, error) {
++ return nil, errors.New("not implemented")
++}
++
++func (f *SimpleFileFinder) ext() (string, error) {
++ return f.resolver.ExtensionForContentType(f.contentTyper.ContentType)
++}
++
++// ListNamespaces lists the available namespaces for the given GroupKind.
++// This function shall only be called for namespaced objects, it is up to
++// the caller to make sure they do not call this method for root-spaced
++// objects. If any of the given rules are violated, ErrNamespacedMismatch
++// should be returned as a wrapped error.
++//
++// The implementer can choose between basing the answer strictly on e.g.
++// v1.Namespace objects that exist in the system, or just the set of
++// different namespaces that have been set on any object belonging to
++// the given GroupKind.
++func (f *SimpleFileFinder) ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.String, error) {
++ entries, err := readDir(ctx, f.fs, f.kindKeyPath(gk))
++ if err != nil {
++ return nil, err
++ }
++ return sets.NewString(entries...), nil
++}
++
++// ListObjectIDs returns a list of unversioned ObjectIDs.
++// For namespaced GroupKinds, the caller must provide a namespace, and for
++// root-spaced GroupKinds, the caller must not. When namespaced, this function
++// must only return object IDs for that given namespace. If any of the given
++// rules are violated, ErrNamespacedMismatch should be returned as a wrapped error.
++func (f *SimpleFileFinder) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) ([]core.UnversionedObjectID, error) {
++ // If namespace is empty, the names will be in ./, otherwise .//
++ namesDir := filepath.Join(f.kindKeyPath(gk), namespace)
++ entries, err := readDir(ctx, f.fs, namesDir)
++ if err != nil {
++ return nil, err
++ }
++ // Get the file extension
++ ext, err := f.ext()
++ if err != nil {
++ return nil, err
++ }
++ // Map the names to UnversionedObjectIDs
++ ids := make([]core.UnversionedObjectID, 0, len(entries))
++ for _, entry := range entries {
++ // Loop through all entries, and make sure they are sanitized .metadata.name's
++ if f.opts.SubDirectoryFileName != "" {
++ // If f.SubDirectoryFileName != "", the file names already match .metadata.name
++ // Make sure the metadata file ./<.metadata.name>/. actually exists
++ expectedPath := filepath.Join(namesDir, entry, f.opts.SubDirectoryFileName+ext)
++ if exists, _ := f.fs.Exists(ctx, expectedPath); !exists {
++ continue
++ }
++ } else {
++ // Storage path is ./.. entry is "."
++ // Verify the extension is there and strip it from name. If ext isn't there, just continue
++ if !strings.HasSuffix(entry, ext) {
++ continue
++ }
++ // Remove the extension from the name
++ entry = strings.TrimSuffix(entry, ext)
++ }
++ // If we got this far, add the key to the list
++ ids = append(ids, core.NewUnversionedObjectID(gk, core.ObjectKey{Name: entry, Namespace: namespace}))
++ }
++ return ids, nil
++}
++
++func readDir(ctx context.Context, fs Filesystem, dir string) ([]string, error) {
++ fi, err := fs.Stat(ctx, dir)
++ if os.IsNotExist(err) {
++ // It's ok if the directory doesn't exist (yet), we just don't have any items then :)
++ return nil, nil
++ } else if !fi.IsDir() {
++ // Unexpected, if the directory actually would be a file
++ return nil, fmt.Errorf("expected that %s is a directory", dir)
++ }
++
++ // When we know that path is a directory, go ahead and read it
++ entries, err := fs.ReadDir(ctx, dir)
++ if err != nil {
++ return nil, err
++ }
++ fileNames := make([]string, 0, len(entries))
++ for _, entry := range entries {
++ fileNames = append(fileNames, entry.Name())
++ }
++ return fileNames, nil
++}
+diff --git a/pkg/storage/filesystem/filesystem.go b/pkg/storage/filesystem/filesystem.go
+new file mode 100644
+index 0000000..f523e7b
+--- /dev/null
++++ b/pkg/storage/filesystem/filesystem.go
+@@ -0,0 +1,128 @@
++package filesystem
++
++import (
++ "context"
++ "os"
++ "path/filepath"
++ "strconv"
++
++ "github.com/spf13/afero"
++)
++
++// Filesystem extends afero.Fs and afero.Afero with contexts added to every method.
++type Filesystem interface {
++
++ // Members of afero.Fs
++
++ // MkdirAll creates a directory path and all parents that does not exist
++ // yet.
++ MkdirAll(ctx context.Context, path string, perm os.FileMode) error
++ // Remove removes a file identified by name, returning an error, if any
++ // happens.
++ Remove(ctx context.Context, name string) error
++ // Stat returns a FileInfo describing the named file, or an error, if any
++ // happens.
++ Stat(ctx context.Context, name string) (os.FileInfo, error)
++
++ // Members of afero.Afero
++
++ ReadDir(ctx context.Context, dirname string) ([]os.FileInfo, error)
++
++ Exists(ctx context.Context, path string) (bool, error)
++
++ ReadFile(ctx context.Context, filename string) ([]byte, error)
++
++ WriteFile(ctx context.Context, filename string, data []byte, perm os.FileMode) error
++
++ Walk(ctx context.Context, root string, walkFn filepath.WalkFunc) error
++
++ // Custom methods
++
++ // Checksum returns a checksum of the given file.
++ //
++ // What the checksum is is application-dependent, however, it
++ // should be the same for two invocations, as long as the stored
++ // data is the same. It might change over time although the
++ // underlying data did not. Examples of checksums that can be
++ // used is: the file modification timestamp, a sha256sum of the
++ // file content, or the latest Git commit when the file was
++ // changed.
++ //
++ // os.IsNotExist(err) can be used to check if the file doesn't
++ // exist.
++ Checksum(ctx context.Context, filename string) (string, error)
++
++ // RootDirectory specifies where on disk the root directory is stored.
++ // This path MUST be absolute. All other paths for the other methods
++ // MUST be relative to this directory.
++ RootDirectory() string
++}
++
++// NewOSFilesystem creates a new afero.OsFs for the local directory, using
++// NewFilesystem underneath.
++func NewOSFilesystem(rootDir string) Filesystem {
++ return NewFilesystem(afero.NewOsFs(), rootDir)
++}
++
++// NewFilesystem wraps an underlying afero.Fs without context knowledge,
++// in a Filesystem-compliant implementation; scoped at the given directory
++// (i.e. wrapped in afero.NewBasePathFs(fs, rootDir)).
++//
++// Checksum is calculated based on the modification timestamp of the file.
++func NewFilesystem(fs afero.Fs, rootDir string) Filesystem {
++ // TODO: rootDir validation? It must be absolute, exist, and be a directory.
++ return &filesystem{afero.NewBasePathFs(fs, rootDir), rootDir}
++}
++
++type filesystem struct {
++ fs afero.Fs
++ rootDir string
++}
++
++func (f *filesystem) RootDirectory() string {
++ return f.rootDir
++}
++
++func (f *filesystem) Checksum(ctx context.Context, filename string) (string, error) {
++ fi, err := f.Stat(ctx, filename)
++ if err != nil {
++ return "", err
++ }
++ return checksumFromFileInfo(fi), nil
++}
++
++func (f *filesystem) MkdirAll(_ context.Context, path string, perm os.FileMode) error {
++ return f.fs.MkdirAll(path, perm)
++}
++
++func (f *filesystem) Remove(_ context.Context, name string) error {
++ return f.fs.Remove(name)
++}
++
++func (f *filesystem) Stat(_ context.Context, name string) (os.FileInfo, error) {
++ return f.fs.Stat(name)
++}
++
++func (f *filesystem) ReadDir(_ context.Context, dirname string) ([]os.FileInfo, error) {
++ return afero.ReadDir(f.fs, dirname)
++}
++
++func (f *filesystem) Exists(_ context.Context, path string) (bool, error) {
++ return afero.Exists(f.fs, path)
++}
++
++func (f *filesystem) ReadFile(_ context.Context, filename string) ([]byte, error) {
++ return afero.ReadFile(f.fs, filename)
++}
++
++func (f *filesystem) WriteFile(_ context.Context, filename string, data []byte, perm os.FileMode) error {
++ return afero.WriteFile(f.fs, filename, data, perm)
++}
++
++func (f *filesystem) Walk(_ context.Context, root string, walkFn filepath.WalkFunc) error {
++ return afero.Walk(f.fs, root, walkFn)
++}
++
++func checksumFromFileInfo(fi os.FileInfo) string {
++ return strconv.FormatInt(fi.ModTime().UnixNano(), 10)
++}
+diff --git a/pkg/storage/filesystem/format.go b/pkg/storage/filesystem/format.go
+new file mode 100644
+index 0000000..b36aa1c
+--- /dev/null
++++ b/pkg/storage/filesystem/format.go
+@@ -0,0 +1,92 @@
++package filesystem
++
++import (
++ "context"
++ "errors"
++ "fmt"
++ "path/filepath"
++
++ "github.com/weaveworks/libgitops/pkg/serializer"
++)
++
++var (
++ ErrCannotDetermineContentType = errors.New("cannot determine content type")
++ ErrUnrecognizedContentType = errors.New("unrecognized content type")
++)
++
++// ContentTyper resolves the Content Type of a file given its path and the afero
++// filesystem abstraction, so that it is possible to even examine the file if needed
++// for making the judgement. See DefaultContentTyper for a sample implementation.
++type ContentTyper interface {
++ // ContentTypeForPath should return the content type for the file that exists in
++ // the given Filesystem (path is relative). If the content type cannot be determined
++ // please return a wrapped ErrCannotDetermineContentType error.
++ ContentTypeForPath(ctx context.Context, fs Filesystem, path string) (serializer.ContentType, error)
++}
++
++// DefaultContentTypes describes the default connection between
++// file extensions and a content types.
++var DefaultContentTyper ContentTyper = ContentTypeForExtension{
++ ".json": serializer.ContentTypeJSON,
++ ".yaml": serializer.ContentTypeYAML,
++ ".yml": serializer.ContentTypeYAML,
++}
++
++// ContentTypeForExtension implements the ContentTyper interface
++// by looking up the extension of the given path in ContentTypeForPath
++// matched against the key of the map. The extension in the map key
++// must start with a dot, e.g. ".json". The value of the map contains
++// the corresponding content type. There might be many extensions which
++// map to the same content type, e.g. both ".yaml" -> ContentTypeYAML
++// and ".yml" -> ContentTypeYAML.
++type ContentTypeForExtension map[string]serializer.ContentType
++
++func (m ContentTypeForExtension) ContentTypeForPath(ctx context.Context, _ Filesystem, path string) (serializer.ContentType, error) {
++ ct, ok := m[filepath.Ext(path)]
++ if !ok {
++ return serializer.ContentType(""), fmt.Errorf("%w for file %q", ErrCannotDetermineContentType, path)
++ }
++ return ct, nil
++}
++
++// StaticContentTyper always responds with the same, statically-set, ContentType for any path.
++type StaticContentTyper struct {
++ // ContentType is a required field
++ ContentType serializer.ContentType
++}
++
++func (t StaticContentTyper) ContentTypeForPath(_ context.Context, _ Filesystem, _ string) (serializer.ContentType, error) {
++ if len(t.ContentType) == 0 {
++ return "", fmt.Errorf("StaticContentTyper.ContentType must not be empty")
++ }
++ return t.ContentType, nil
++}
++
++// FileExtensionResolver knows how to resolve what file extension to use for
++// a given ContentType.
++type FileExtensionResolver interface {
++ // ContentTypeExtension returns the file extension for the given ContentType.
++ // The returned string MUST start with a dot, e.g. ".json". If the given
++ // ContentType is not known, it is recommended to return a wrapped
++ // ErrUnrecognizedContentType.
++ ExtensionForContentType(ct serializer.ContentType) (string, error)
++}
++
++// DefaultFileExtensionResolver describes a default connection between
++// the file extensions and ContentTypes , namely JSON -> ".json" and
++// YAML -> ".yaml".
++var DefaultFileExtensionResolver FileExtensionResolver = ExtensionForContentType{
++ serializer.ContentTypeJSON: ".json",
++ serializer.ContentTypeYAML: ".yaml",
++}
++
++// ExtensionForContentType is a simple map implementation of FileExtensionResolver.
++type ExtensionForContentType map[serializer.ContentType]string
++
++func (m ExtensionForContentType) ExtensionForContentType(ct serializer.ContentType) (string, error) {
++ ext, ok := m[ct]
++ if !ok {
++ return "", fmt.Errorf("%q: %q", ErrUnrecognizedContentType, ct)
++ }
++ return ext, nil
++}
+diff --git a/pkg/storage/filesystem/interfaces.go b/pkg/storage/filesystem/interfaces.go
+new file mode 100644
+index 0000000..2626680
+--- /dev/null
++++ b/pkg/storage/filesystem/interfaces.go
+@@ -0,0 +1,49 @@
++package filesystem
++
++import (
++ "context"
++
++ "github.com/weaveworks/libgitops/pkg/storage"
++ "github.com/weaveworks/libgitops/pkg/storage/core"
++)
++
++// Storage (in this filesystem package) extends storage.Storage by specializing it to operate in a
++// filesystem context, and in other words use a FileFinder to locate the
++// files to operate on.
++type Storage interface {
++ storage.Storage
++
++ // FileFinder returns the underlying FileFinder used.
++ // TODO: Maybe one Storage can have multiple FileFinders?
++ FileFinder() FileFinder
++}
++
++// FileFinder is a generic implementation for locating files on disk, to be
++// used by a Storage.
++//
++// Important: The caller MUST guarantee that the implementation can figure
++// out if the GroupKind is namespaced or not by the following check:
++//
++// namespaced := id.ObjectKey().Namespace != ""
++//
++// In other words, the caller must enforce a namespace being set for namespaced
++// kinds, and namespace not being set for non-namespaced kinds.
++type FileFinder interface {
++ // Filesystem gets the underlying filesystem abstraction, if
++ // applicable.
++ Filesystem() Filesystem
++
++ // ContentTyper gets the underlying ContentTyper used. The ContentTyper
++ // must always return a result although the underlying given path doesn't
++ // exist.
++ ContentTyper() ContentTyper
++
++ // ObjectPath gets the file path relative to the root directory.
++ // In order to support a create operation, this function must also return a valid path for
++ // files that do not yet exist on disk.
++ ObjectPath(ctx context.Context, id core.UnversionedObjectID) (string, error)
++ // ObjectAt retrieves the ID based on the given relative file path to fs.
++ ObjectAt(ctx context.Context, path string) (core.UnversionedObjectID, error)
++ // The FileFinder should be able to list namespaces and Object IDs
++ storage.Lister
++}
+diff --git a/pkg/storage/filesystem/path_excluder.go b/pkg/storage/filesystem/path_excluder.go
+new file mode 100644
+index 0000000..58e8d2a
+--- /dev/null
++++ b/pkg/storage/filesystem/path_excluder.go
+@@ -0,0 +1,92 @@
++package filesystem
++
++import (
++ "os"
++ "path/filepath"
++ "strings"
++
++ "k8s.io/apimachinery/pkg/util/sets"
++)
++
++// PathExcluder is an interface that lets the user implement custom policies
++// for whether a given relative path to a given directory (fs is scoped at
++// that directory) should be considered for an operation (e.g. inotify watch
++// or file search).
++type PathExcluder interface {
++ // ShouldExcludePath takes in a relative path to the file which maybe
++ // should be excluded.
++ ShouldExcludePath(path string) bool
++}
++
++// DefaultPathExcluders returns a composition of
++// ExcludeDirectoryNames{} for ".git" dirs and ExcludeExtensions{} for the ".swp" file extensions.
++func DefaultPathExcluders() PathExcluder {
++ return MultiPathExcluder{
++ PathExcluders: []PathExcluder{
++ ExcludeDirectoryNames{
++ DirectoryNamesToExclude: []string{".git"},
++ },
++ ExcludeExtensions{
++ Extensions: []string{".swp"}, // nano creates temporary .swp
++ },
++ },
++ }
++}
++
++// ExcludeDirectoryNames implements PathExcluder.
++var _ PathExcluder = ExcludeDirectoryNames{}
++
++// ExcludeDirectories is a sample implementation of PathExcluder, that excludes
++// files that have any parent directories with the given names.
++type ExcludeDirectoryNames struct {
++ DirectoryNamesToExclude []string
++}
++
++func (e ExcludeDirectoryNames) ShouldExcludePath(path string) bool {
++ parts := strings.Split(filepath.Clean(path), string(os.PathSeparator))
++ return sets.NewString(parts[:len(parts)-1]...).HasAny(e.DirectoryNamesToExclude...)
++}
++
++// ExcludeExtensions implements PathExcluder.
++var _ PathExcluder = ExcludeExtensions{}
++
++// ExcludeExtensions is a sample implementation of PathExcluder, that excludes
++// all files with the given extensions. The strings in the Extensions slice
++// must be in the form of filepath.Ext, i.e. ".json", ".txt", and so forth.
++// The zero value of ExcludeExtensions excludes no files.
++type ExcludeExtensions struct {
++ Extensions []string
++}
++
++func (e ExcludeExtensions) ShouldExcludePath(path string) bool {
++ ext := filepath.Ext(path)
++ for _, exclExt := range e.Extensions {
++ if ext == exclExt {
++ return true
++ }
++ }
++ return false
++}
++
++// MultiPathExcluder implements PathExcluder.
++var _ PathExcluder = &MultiPathExcluder{}
++
++// MultiPathExcluder is a composite PathExcluder that runs all of the
++// PathExcluders in the slice one-by-one, and returns true if any of them
++// does. The zero value of MultiPathExcluder excludes no files.
++type MultiPathExcluder struct {
++ PathExcluders []PathExcluder
++}
++
++func (m MultiPathExcluder) ShouldExcludePath(path string) bool {
++ // Loop through all the excluders, and return true if any of them does
++ for _, excl := range m.PathExcluders {
++ if excl == nil {
++ continue
++ }
++ if excl.ShouldExcludePath(path) {
++ return true
++ }
++ }
++ return false
++}
+diff --git a/pkg/storage/filesystem/path_excluder_test.go b/pkg/storage/filesystem/path_excluder_test.go
+new file mode 100644
+index 0000000..5995fd2
+--- /dev/null
++++ b/pkg/storage/filesystem/path_excluder_test.go
+@@ -0,0 +1,77 @@
++package filesystem
++
++import (
++ "testing"
++)
++
++func TestExcludeGitDirectory_ShouldExcludePath(t *testing.T) {
++ tests := []struct {
++ name string
++ path string
++ want bool
++ }{
++ {
++ name: "normal",
++ path: ".git/foo",
++ want: true,
++ },
++ {
++ name: "with relative path",
++ path: "./.git/bar/baz",
++ want: true,
++ },
++ {
++ name: "with many parents",
++ path: "/foo/bar/.git/hello",
++ want: true,
++ },
++ {
++ name: "with many children",
++ path: ".git/foo/bar/baz",
++ want: true,
++ },
++ {
++ name: "with parents and children",
++ path: "./foo/bar/.git/baz/bar",
++ want: true,
++ },
++ {
++ name: "empty",
++ path: "",
++ want: false,
++ },
++ {
++ name: "local dir",
++ path: ".",
++ want: false,
++ },
++ {
++ name: "other prefix",
++ path: "foo.git",
++ want: false,
++ },
++ {
++ name: "other suffix",
++ path: ".gitea",
++ want: false,
++ },
++ {
++ name: "absolute path without git",
++ path: "/foo/bar/no/git/here",
++ want: false,
++ },
++ {
++ name: "don't catch files named .git",
++ path: "/hello/.git",
++ want: false,
++ },
++ }
++ e := ExcludeDirectoryNames{DirectoryNamesToExclude: []string{".git"}}
++ for _, tt := range tests {
++ t.Run(tt.name, func(t *testing.T) {
++ if got := e.ShouldExcludePath(tt.path); got != tt.want {
++ t.Errorf("ExcludeGitDirectory.ShouldExcludePath() = %v, want %v", got, tt.want)
++ }
++ })
++ }
++}
+diff --git a/pkg/storage/filesystem/storage.go b/pkg/storage/filesystem/storage.go
+new file mode 100644
+index 0000000..f3bc287
+--- /dev/null
++++ b/pkg/storage/filesystem/storage.go
+@@ -0,0 +1,170 @@
++package filesystem
++
++import (
++ "context"
++ "fmt"
++ "os"
++ "path/filepath"
++
++ "github.com/weaveworks/libgitops/pkg/serializer"
++ "github.com/weaveworks/libgitops/pkg/storage"
++ "github.com/weaveworks/libgitops/pkg/storage/core"
++ "k8s.io/apimachinery/pkg/util/sets"
++)
++
++// NewGeneric creates a new Generic using the given lower-level
++// FileFinder and Namespacer.
++func NewGeneric(fileFinder FileFinder, namespacer core.Namespacer) (Storage, error) {
++ if fileFinder == nil {
++ return nil, fmt.Errorf("NewGeneric: fileFinder is mandatory")
++ }
++ if namespacer == nil {
++ return nil, fmt.Errorf("NewGeneric: namespacer is mandatory")
++ }
++
++ return &Generic{
++ fileFinder: fileFinder,
++ namespacer: namespacer,
++ }, nil
++}
++
++// Generic is a Storage-compliant implementation, that
++// combines the given lower-level FileFinder, Namespacer and Filesystem interfaces
++// in a generic manner.
++type Generic struct {
++ fileFinder FileFinder
++ namespacer core.Namespacer
++}
++
++func (r *Generic) Namespacer() core.Namespacer {
++ return r.namespacer
++}
++
++func (r *Generic) FileFinder() FileFinder {
++ return r.fileFinder
++}
++
++func (r *Generic) Read(ctx context.Context, id core.UnversionedObjectID) ([]byte, error) {
++ // Get the path and verify namespacing info
++ p, err := r.getPath(ctx, id)
++ if err != nil {
++ return nil, err
++ }
++ // Check if the resource indicated by key exists
++ if !r.exists(ctx, p) {
++ return nil, core.NewErrNotFound(id)
++ }
++ // Read the file
++ return r.FileFinder().Filesystem().ReadFile(ctx, p)
++}
++
++func (r *Generic) Exists(ctx context.Context, id core.UnversionedObjectID) bool {
++ // Get the path and verify namespacing info
++ p, err := r.getPath(ctx, id)
++ if err != nil {
++ return false
++ }
++ return r.exists(ctx, p)
++}
++
++func (r *Generic) exists(ctx context.Context, path string) bool {
++ exists, _ := r.FileFinder().Filesystem().Exists(ctx, path)
++ return exists
++}
++
++func (r *Generic) Checksum(ctx context.Context, id core.UnversionedObjectID) (string, error) {
++ // Get the path and verify namespacing info
++ p, err := r.getPath(ctx, id)
++ if err != nil {
++ return "", err
++ }
++ // Return a "high level" error if the file does not exist
++ checksum, err := r.FileFinder().Filesystem().Checksum(ctx, p)
++ if os.IsNotExist(err) {
++ return "", core.NewErrNotFound(id)
++ } else if err != nil {
++ return "", err
++ }
++ return checksum, nil
++}
++
++func (r *Generic) ContentType(ctx context.Context, id core.UnversionedObjectID) (serializer.ContentType, error) {
++ // Get the path and verify namespacing info
++ p, err := r.getPath(ctx, id)
++ if err != nil {
++ return "", err
++ }
++ return r.FileFinder().ContentTyper().ContentTypeForPath(ctx, r.fileFinder.Filesystem(), p)
++}
++
++func (r *Generic) Write(ctx context.Context, id core.UnversionedObjectID, content []byte) error {
++ // Get the path and verify namespacing info
++ p, err := r.getPath(ctx, id)
++ if err != nil {
++ return err
++ }
++
++ // Create the underlying directories if they do not exist already
++ if !r.exists(ctx, p) {
++ if err := r.FileFinder().Filesystem().MkdirAll(ctx, filepath.Dir(p), 0755); err != nil {
++ return err
++ }
++ }
++ // Write the file content
++ return r.FileFinder().Filesystem().WriteFile(ctx, p, content, 0664)
++}
++
++func (r *Generic) Delete(ctx context.Context, id core.UnversionedObjectID) error {
++ // Get the path and verify namespacing info
++ p, err := r.getPath(ctx, id)
++ if err != nil {
++ return err
++ }
++
++ // Check if the resource indicated by key exists
++ if !r.exists(ctx, p) {
++ return core.NewErrNotFound(id)
++ }
++ // Remove the file
++ return r.FileFinder().Filesystem().Remove(ctx, p)
++}
++
++// ListNamespaces lists the available namespaces for the given GroupKind.
++// This function shall only be called for namespaced objects, it is up to
++// the caller to make sure they do not call this method for root-spaced
++// objects; for that the behavior is undefined (but returning an error
++// is recommended).
++func (r *Generic) ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.String, error) {
++ namespaced, err := r.namespacer.IsNamespaced(gk)
++ if err != nil {
++ return nil, err
++ }
++ // Validate the groupkind
++ if !namespaced {
++ return nil, fmt.Errorf("%w: cannot list namespaces for non-namespaced kind: %v", storage.ErrNamespacedMismatch, gk)
++ }
++ // Just use the underlying filefinder
++ return r.FileFinder().ListNamespaces(ctx, gk)
++}
++
++// ListObjectIDs returns a list of unversioned ObjectIDs.
++// For namespaced GroupKinds, the caller must provide a namespace, and for
++// root-spaced GroupKinds, the caller must not. When namespaced, this function
++// must only return object IDs for that given namespace.
++func (r *Generic) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) ([]core.UnversionedObjectID, error) {
++ // Validate the namespace parameter
++ if err := storage.VerifyNamespaced(r.Namespacer(), gk, namespace); err != nil {
++ return nil, err
++ }
++ // Just use the underlying filefinder
++ return r.FileFinder().ListObjectIDs(ctx, gk, namespace)
++}
++
++func (r *Generic) getPath(ctx context.Context, id core.UnversionedObjectID) (string, error) {
++ // Verify namespacing info
++ if err := storage.VerifyNamespaced(r.Namespacer(), id.GroupKind(), id.ObjectKey().Namespace); err != nil {
++ return "", err
++ }
++ // Get the path
++ return r.FileFinder().ObjectPath(ctx, id)
++}
+diff --git a/pkg/storage/filesystem/unstructured/event/storage.go b/pkg/storage/filesystem/unstructured/event/storage.go
+new file mode 100644
+index 0000000..0d674b5
+--- /dev/null
++++ b/pkg/storage/filesystem/unstructured/event/storage.go
+@@ -0,0 +1,352 @@
++package unstructuredevent
++
++import (
++ "context"
++ "fmt"
++ gosync "sync"
++
++ "github.com/sirupsen/logrus"
++ "github.com/weaveworks/libgitops/pkg/storage"
++ "github.com/weaveworks/libgitops/pkg/storage/core"
++ "github.com/weaveworks/libgitops/pkg/storage/event"
++ "github.com/weaveworks/libgitops/pkg/storage/filesystem"
++ "github.com/weaveworks/libgitops/pkg/storage/filesystem/fileevents"
++ "github.com/weaveworks/libgitops/pkg/storage/filesystem/fileevents/inotify"
++ "github.com/weaveworks/libgitops/pkg/storage/filesystem/unstructured"
++ "github.com/weaveworks/libgitops/pkg/util/sync"
++)
++
++// UnstructuredEventStorage is an extension of raw.UnstructuredStorage, that
++// adds the possiblility to listen for object updates from a FileEventsEmitter.
++//
++// When the Sync() function is run; the ObjectEvents that are emitted to the
++// listening channels with have ObjectEvent.Type == ObjectEventSync.
++type UnstructuredEventStorage interface {
++ unstructured.Storage
++ fileevents.StorageCommon
++}
++
++const defaultEventsBufferSize = 4096
++
++// NewManifest is a high-level constructor for a generic
++// MappedFileFinder and filesystem.Storage, together with a
++// inotify FileWatcher; all combined into an UnstructuredEventStorage.
++func NewManifest(
++ dir string,
++ contentTyper filesystem.ContentTyper,
++ namespacer core.Namespacer,
++ recognizer core.ObjectRecognizer,
++ pathExcluder filesystem.PathExcluder,
++) (UnstructuredEventStorage, error) {
++ fs := filesystem.NewOSFilesystem(dir)
++ fileFinder := unstructured.NewGenericMappedFileFinder(contentTyper, fs)
++ fsRaw, err := filesystem.NewGeneric(fileFinder, namespacer)
++ if err != nil {
++ return nil, err
++ }
++ emitter, err := inotify.NewFileWatcher(dir, &inotify.FileWatcherOptions{
++ PathExcluder: pathExcluder,
++ })
++ if err != nil {
++ return nil, err
++ }
++ unstructuredRaw, err := unstructured.NewGeneric(fsRaw, recognizer, pathExcluder)
++ if err != nil {
++ return nil, err
++ }
++ return NewGeneric(unstructuredRaw, emitter, GenericStorageOptions{
++ SyncAtStart: true,
++ EmitSyncEvent: true,
++ })
++}
++
++// NewGeneric is an extended Storage implementation, which
++// together with the provided ObjectRecognizer and FileEventsEmitter listens for
++// file events, keeps the mappings of the filesystem.Storage's MappedFileFinder
++// in sync (s must use the mapped variant), and sends high-level ObjectEvents
++// upstream.
++//
++// Note: This WatchStorage only works for one-frame files (i.e. only one YAML document
++// per file is supported).
++func NewGeneric(
++ s unstructured.Storage,
++ emitter fileevents.Emitter,
++ opts GenericStorageOptions,
++) (UnstructuredEventStorage, error) {
++ return &Generic{
++ Storage: s,
++ emitter: emitter,
++
++ inbound: make(fileevents.FileEventStream, defaultEventsBufferSize),
++ // outbound set by WatchForObjectEvents
++ outboundMu: &gosync.Mutex{},
++
++ // monitor set by WatchForObjectEvents, guarded by outboundMu
++
++ opts: opts,
++ }, nil
++}
++
++type GenericStorageOptions struct {
++ // When Sync(ctx) is run, emit a "SYNC" event to the listening channel
++ // Default: false
++ EmitSyncEvent bool
++ // Do a full re-sync at startup of the watcher
++ // Default: true
++ SyncAtStart bool
++}
++
++// Generic implements UnstructuredEventStorage.
++var _ UnstructuredEventStorage = &Generic{}
++
++// Generic is an extended raw.Storage implementation, which provides a watcher
++// for watching changes in the directory managed by the embedded Storage's RawStorage.
++// If the RawStorage is a MappedRawStorage instance, it's mappings will automatically
++// be updated by the WatchStorage. Update events are sent to the given event stream.
++// Note: This WatchStorage only works for one-frame files (i.e. only one YAML document
++// per file is supported).
++// TODO: Update description
++type Generic struct {
++ unstructured.Storage
++ // the filesystem events emitter
++ emitter fileevents.Emitter
++
++ // channels
++ inbound fileevents.FileEventStream
++ outbound event.ObjectEventStream
++ outboundMu *gosync.Mutex
++
++ // goroutine
++ monitor *sync.Monitor
++
++ // opts
++ opts GenericStorageOptions
++}
++
++func (s *Generic) FileEventsEmitter() fileevents.Emitter {
++ return s.emitter
++}
++
++func (s *Generic) WatchForObjectEvents(ctx context.Context, into event.ObjectEventStream) error {
++ s.outboundMu.Lock()
++ defer s.outboundMu.Unlock()
++ // We don't support more than one listener
++ // TODO: maybe support many listeners in the future?
++ if s.outbound != nil {
++ return fmt.Errorf("WatchStorage: not more than one watch supported: %w", fileevents.ErrTooManyWatches)
++ }
++ // Hook up our inbound channel to the emitter, to make the pipeline functional
++ if err := s.emitter.WatchForFileEvents(ctx, s.inbound); err != nil {
++ return err
++ }
++ // Set outbound at this stage so Sync possibly can send events.
++ s.outbound = into
++ // Start the backing goroutines
++ s.monitor = sync.RunMonitor(s.monitorFunc)
++
++ // Do a full sync in the beginning only if asked. Be aware that without running a Sync
++ // at all before events start happening, the reporting might not work as it should
++ if s.opts.SyncAtStart {
++ // Disregard the changed files at Sync.
++ if _, err := s.Sync(ctx); err != nil {
++ return err
++ }
++ }
++ return nil // all ok
++}
++
++func (s *Generic) Sync(ctx context.Context) ([]unstructured.ChecksumPathID, error) {
++ // Sync the underlying UnstructuredStorage, and see what files had changed since last sync
++ changedObjects, err := s.Storage.Sync(ctx)
++ if err != nil {
++ return nil, err
++ }
++
++ // Send special "sync" events for each of the changed objects, if configured
++ if s.opts.EmitSyncEvent {
++ for _, changedObject := range changedObjects {
++ // Send a special "sync" event for this ObjectID to the events channel
++ s.sendEvent(event.ObjectEventSync, changedObject.ID)
++ }
++ }
++
++ return changedObjects, nil
++}
++
++// Write writes the given content to the resource indicated by the ID.
++// Error returns are implementation-specific.
++func (s *Generic) Write(ctx context.Context, id core.UnversionedObjectID, content []byte) error {
++ // Get the path and verify namespacing info
++ p, err := s.getPath(ctx, id)
++ if err != nil {
++ return err
++ }
++ // Suspend the write event
++ s.emitter.Suspend(ctx, p)
++ // Call the underlying filesystem.Storage
++ return s.Storage.Write(ctx, id, content)
++}
++
++// Delete deletes the resource indicated by the ID.
++// If the resource does not exist, it returns ErrNotFound.
++func (s *Generic) Delete(ctx context.Context, id core.UnversionedObjectID) error {
++ // Get the path and verify namespacing info
++ p, err := s.getPath(ctx, id)
++ if err != nil {
++ return err
++ }
++ // Suspend the write event
++ s.emitter.Suspend(ctx, p)
++ // Call the underlying filesystem.Storage
++ return s.Storage.Delete(ctx, id)
++}
++
++func (s *Generic) getPath(ctx context.Context, id core.UnversionedObjectID) (string, error) {
++ // Verify namespacing info
++ if err := storage.VerifyNamespaced(s.Namespacer(), id.GroupKind(), id.ObjectKey().Namespace); err != nil {
++ return "", err
++ }
++ // Get the path
++ return s.FileFinder().ObjectPath(ctx, id)
++}
++
++func (s *Generic) Close() error {
++ err := s.emitter.Close()
++ // No need to check the error here
++ _ = s.monitor.Wait()
++ return err
++}
++
++func (s *Generic) monitorFunc() error {
++ logrus.Debug("WatchStorage: Monitoring thread started")
++ defer logrus.Debug("WatchStorage: Monitoring thread stopped")
++
++ ctx := context.Background()
++
++ for {
++ // TODO: handle context cancellations, i.e. ctx.Done()
++ ev, ok := <-s.inbound
++ if !ok {
++ logrus.Error("WatchStorage: Fatal: Got non-ok response from watcher.GetFileEventStream()")
++ return nil
++ }
++
++ logrus.Tracef("WatchStorage: Processing event: %s", ev.Type)
++
++ // Skip the file if it has an invalid path
++ if !filesystem.IsValidFileInFilesystem(
++ ctx,
++ s.FileFinder().Filesystem(),
++ s.FileFinder().ContentTyper(),
++ s.PathExcluder(),
++ ev.Path) {
++ logrus.Tracef("WatchStorage: Skipping file %q as it is ignored by the ContentTyper/PathExcluder", ev.Path)
++ continue
++ }
++
++ var err error
++ switch ev.Type {
++ // FileEventModify is also sent for newly-created files
++ case fileevents.FileEventModify, fileevents.FileEventMove:
++ err = s.handleModifyMove(ctx, ev)
++ case fileevents.FileEventDelete:
++ err = s.handleDelete(ctx, ev)
++ default:
++ err = fmt.Errorf("cannot handle update of type %v for path %q", ev.Type, ev.Path)
++ }
++ if err != nil {
++ logrus.Errorf("WatchStorage: %v", err)
++ }
++ }
++}
++
++func (s *Generic) handleDelete(ctx context.Context, ev *fileevents.FileEvent) error {
++ // The object is deleted, so we need to do a reverse-lookup of what kind of object
++ // was there earlier, based on the path. This assumes that the filefinder organizes
++ // the known objects in such a way that it is able to do the reverse-lookup. For
++ // mapped FileFinders, by this point the path should still be in the local cache,
++ // which should make us able to get the ID before deleted from the cache.
++ objectID, err := s.MappedFileFinder().ObjectAt(ctx, ev.Path)
++ if err != nil {
++ return fmt.Errorf("failed to reverse lookup ID for deleted file %q: %v", ev.Path, err)
++ }
++
++ // Remove the mapping from the FileFinder cache for this ID as it's now deleted
++ s.deleteMapping(ctx, objectID)
++ // Send the delete event to the channel
++ s.sendEvent(event.ObjectEventDelete, objectID)
++ return nil
++}
++
++func (s *Generic) handleModifyMove(ctx context.Context, ev *fileevents.FileEvent) error {
++ // Read the content of this modified, moved or created file
++ content, err := s.FileFinder().Filesystem().ReadFile(ctx, ev.Path)
++ if err != nil {
++ return fmt.Errorf("could not read %q: %v", ev.Path, err)
++ }
++
++ // Try to recognize the object
++ versionedID, err := s.ObjectRecognizer().ResolveObjectID(ctx, ev.Path, content)
++ if err != nil {
++ return fmt.Errorf("did not recognize object at path %q: %v", ev.Path, err)
++ }
++
++ // If the file was just moved around, just overwrite the earlier mapping
++ if ev.Type == fileevents.FileEventMove {
++ // This assumes that the file content does not change in the move
++ // operation. TODO: document this as a requirement for the Emitter.
++ s.setMapping(ctx, versionedID, ev.Path)
++
++ // Internal move events are a no-op
++ return nil
++ }
++
++ // Determine if this object already existed in the fileFinder's cache,
++ // in order to find out if the object was created or modified (default).
++ // TODO: In the future, maybe support multiple files pointing to the same
++ // ObjectID? Case in point here is e.g. a Modify event for a known path that
++ // changes the underlying ObjectID.
++ objectEvent := event.ObjectEventUpdate
++ // Set the mapping if it didn't exist before; assume this is a Create event
++ if _, ok := s.MappedFileFinder().GetMapping(ctx, versionedID); !ok {
++ // This is what actually determines if an Object is created,
++ // so update the event to update.ObjectEventCreate here
++ objectEvent = event.ObjectEventCreate
++ }
++ // Update the mapping between this object and path (this updates
++ // the checksum underneath too).
++ s.setMapping(ctx, versionedID, ev.Path)
++ // Send the event to the channel
++ s.sendEvent(objectEvent, versionedID)
++ return nil
++}
++
++func (s *Generic) sendEvent(eventType event.ObjectEventType, id core.UnversionedObjectID) {
++ logrus.Tracef("Generic: Sending event: %v", eventType)
++ s.outbound <- &event.ObjectEvent{
++ ID: id,
++ Type: eventType,
++ }
++}
++
++// setMapping registers a mapping between the given object and the specified path, if raw is a
++// MappedRawStorage. If a given mapping already exists between this object and some path, it
++// will be overridden with the specified new path
++func (s *Generic) setMapping(ctx context.Context, id core.UnversionedObjectID, path string) {
++ // Get the current checksum of the new file
++ checksum, err := s.MappedFileFinder().Filesystem().Checksum(ctx, path)
++ if err != nil {
++ logrus.Errorf("Unexpected error when getting checksum of file %q: %v", path, err)
++ return
++ }
++ // Register the current state in the cache
++ s.MappedFileFinder().SetMapping(ctx, id, unstructured.ChecksumPath{
++ Path: path,
++ Checksum: checksum,
++ })
++}
++
++// deleteMapping removes a mapping a file that doesn't exist
++func (s *Generic) deleteMapping(ctx context.Context, id core.UnversionedObjectID) {
++ s.MappedFileFinder().DeleteMapping(ctx, id)
++}
+diff --git a/pkg/storage/filesystem/unstructured/filefinder_mapped.go b/pkg/storage/filesystem/unstructured/filefinder_mapped.go
+new file mode 100644
+index 0000000..274da22
+--- /dev/null
++++ b/pkg/storage/filesystem/unstructured/filefinder_mapped.go
+@@ -0,0 +1,157 @@
++package unstructured
++
++import (
++ "context"
++ "errors"
++
++ "github.com/weaveworks/libgitops/pkg/storage/core"
++ "github.com/weaveworks/libgitops/pkg/storage/filesystem"
++ utilerrs "k8s.io/apimachinery/pkg/util/errors"
++ "k8s.io/apimachinery/pkg/util/sets"
++)
++
++var (
++ // ErrNotTracked is returned when the requested resource wasn't found.
++ ErrNotTracked = errors.New("untracked object")
++)
++
++// GenericMappedFileFinder implements MappedFileFinder.
++var _ MappedFileFinder = &GenericMappedFileFinder{}
++
++// NewGenericMappedFileFinder creates a new instance of GenericMappedFileFinder,
++// that implements the MappedFileFinder interface. The contentTyper is optional,
++// by default core.DefaultContentTyper will be used.
++func NewGenericMappedFileFinder(contentTyper filesystem.ContentTyper, fs filesystem.Filesystem) MappedFileFinder {
++ if contentTyper == nil {
++ contentTyper = filesystem.DefaultContentTyper
++ }
++ if fs == nil {
++ panic("NewGenericMappedFileFinder: fs is mandatory")
++ }
++ return &GenericMappedFileFinder{
++ contentTyper: contentTyper,
++ // TODO: Support multiple branches
++ branch: &branchImpl{},
++ fs: fs,
++ }
++}
++
++// GenericMappedFileFinder is a generic implementation of MappedFileFinder.
++// It uses a ContentTyper to identify what content type a file uses.
++//
++// This implementation relies on that all information about what files exist
++// is fed through SetMapping(s). If a file or ID is requested that doesn't
++// exist in the internal cache, ErrNotTracked will be returned.
++//
++// Hence, this implementation does not at the moment support creating net-new
++// Objects without someone calling SetMapping() first.
++type GenericMappedFileFinder struct {
++ // Default: DefaultContentTyper
++ contentTyper filesystem.ContentTyper
++ fs filesystem.Filesystem
++
++ branch branch
++}
++
++func (f *GenericMappedFileFinder) Filesystem() filesystem.Filesystem {
++ return f.fs
++}
++
++func (f *GenericMappedFileFinder) ContentTyper() filesystem.ContentTyper {
++ return f.contentTyper
++}
++
++// ObjectPath gets the file path relative to the root directory
++func (f *GenericMappedFileFinder) ObjectPath(ctx context.Context, id core.UnversionedObjectID) (string, error) {
++ cp, ok := f.GetMapping(ctx, id)
++ if !ok {
++ // TODO: separate interface for "new creates"?
++ return "", utilerrs.NewAggregate([]error{ErrNotTracked, core.NewErrNotFound(id)})
++ }
++ return cp.Path, nil
++}
++
++// ObjectAt retrieves the ID containing the virtual path based
++// on the given physical file path.
++func (f *GenericMappedFileFinder) ObjectAt(ctx context.Context, path string) (core.UnversionedObjectID, error) {
++ // TODO: Add reverse tracking too?
++ for gk, gkIter := range f.branch.raw() {
++ for ns, nsIter := range gkIter.raw() {
++ for name, cp := range nsIter.raw() {
++ if cp.Path == path {
++ return core.NewUnversionedObjectID(gk, core.ObjectKey{Name: name, Namespace: ns}), nil
++ }
++ }
++ }
++ }
++ // TODO: Support "creation" of Objects easier, in a generic way through an interface, e.g.
++ // NewObjectPlacer?
++ return nil, ErrNotTracked
++}
++
++// ListNamespaces lists the available namespaces for the given GroupKind.
++// This function shall only be called for namespaced objects, it is up to
++// the caller to make sure they do not call this method for root-spaced
++// objects. If any of the given rules are violated, ErrNamespacedMismatch
++// should be returned as a wrapped error.
++//
++// The implementer can choose between basing the answer strictly on e.g.
++// v1.Namespace objects that exist in the system, or just the set of
++// different namespaces that have been set on any object belonging to
++// the given GroupKind.
++func (f *GenericMappedFileFinder) ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.String, error) {
++ m := f.branch.groupKind(gk).raw()
++ nsSet := sets.NewString()
++ for ns := range m {
++ nsSet.Insert(ns)
++ }
++ return nsSet, nil
++}
++
++// ListObjectIDs returns a list of unversioned ObjectIDs.
++// For namespaced GroupKinds, the caller must provide a namespace, and for
++// root-spaced GroupKinds, the caller must not. When namespaced, this function
++// must only return object IDs for that given namespace. If any of the given
++// rules are violated, ErrNamespacedMismatch should be returned as a wrapped error.
++func (f *GenericMappedFileFinder) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) ([]core.UnversionedObjectID, error) {
++ m := f.branch.groupKind(gk).namespace(namespace).raw()
++ ids := make([]core.UnversionedObjectID, 0, len(m))
++ for name := range m {
++ ids = append(ids, core.NewUnversionedObjectID(gk, core.ObjectKey{Name: name, Namespace: namespace}))
++ }
++ return ids, nil
++}
++
++// GetMapping retrieves a mapping in the system
++func (f *GenericMappedFileFinder) GetMapping(ctx context.Context, id core.UnversionedObjectID) (ChecksumPath, bool) {
++ cp, ok := f.branch.
++ groupKind(id.GroupKind()).
++ namespace(id.ObjectKey().Namespace).
++ name(id.ObjectKey().Name)
++ return cp, ok
++}
++
++// SetMapping binds an ID's virtual path to a physical file path
++func (f *GenericMappedFileFinder) SetMapping(ctx context.Context, id core.UnversionedObjectID, checksumPath ChecksumPath) {
++ f.branch.
++ groupKind(id.GroupKind()).
++ namespace(id.ObjectKey().Namespace).
++ setName(id.ObjectKey().Name, checksumPath)
++}
++
++// ResetMappings replaces all mappings at once
++func (f *GenericMappedFileFinder) ResetMappings(ctx context.Context, m map[core.UnversionedObjectID]ChecksumPath) {
++ f.branch = &branchImpl{}
++ for id, cp := range m {
++ f.SetMapping(ctx, id, cp)
++ }
++}
++
++// DeleteMapping removes the physical file path mapping
++// matching the given id
++func (f *GenericMappedFileFinder) DeleteMapping(ctx context.Context, id core.UnversionedObjectID) {
++ f.branch.
++ groupKind(id.GroupKind()).
++ namespace(id.ObjectKey().Namespace).
++ deleteName(id.ObjectKey().Name)
++}
+diff --git a/pkg/storage/filesystem/unstructured/interfaces.go b/pkg/storage/filesystem/unstructured/interfaces.go
+new file mode 100644
+index 0000000..814b437
+--- /dev/null
++++ b/pkg/storage/filesystem/unstructured/interfaces.go
+@@ -0,0 +1,75 @@
++package unstructured
++
++import (
++ "context"
++
++ "github.com/weaveworks/libgitops/pkg/storage/core"
++ "github.com/weaveworks/libgitops/pkg/storage/filesystem"
++)
++
++// Storage is a raw Storage interface that builds on top
++// of Storage. It uses an ObjectRecognizer to recognize
++// otherwise unknown objects in unstructured files.
++// The Storage must use a MappedFileFinder underneath.
++//
++// Multiple Objects in the same file, or multiple Objects with the
++// same ID in multiple files are not supported.
++type Storage interface {
++ filesystem.Storage
++
++ // Sync synchronizes the current state of the filesystem with the
++ // cached mappings in the MappedFileFinder.
++ Sync(ctx context.Context) ([]ChecksumPathID, error)
++
++ // ObjectRecognizer returns the underlying ObjectRecognizer used.
++ ObjectRecognizer() core.ObjectRecognizer
++ // PathExcluder specifies what paths to not sync
++ PathExcluder() filesystem.PathExcluder
++ // MappedFileFinder returns the underlying MappedFileFinder used.
++ MappedFileFinder() MappedFileFinder
++}
++
++// MappedFileFinder is an extension to FileFinder that allows it to have an internal
++// cache with mappings between UnversionedObjectID and a ChecksumPath. This allows
++// higher-order interfaces to manage Objects in files in an unorganized directory
++// (e.g. a Git repo).
++//
++// Multiple Objects in the same file, or multiple Objects with the
++// same ID in multiple files are not supported.
++type MappedFileFinder interface {
++ filesystem.FileFinder
++
++ // GetMapping retrieves a mapping in the system.
++ GetMapping(ctx context.Context, id core.UnversionedObjectID) (ChecksumPath, bool)
++ // SetMapping binds an ID to a physical file path. This operation overwrites
++ // any previous mapping for id.
++ SetMapping(ctx context.Context, id core.UnversionedObjectID, checksumPath ChecksumPath)
++ // ResetMappings replaces all mappings at once to the ones in m.
++ ResetMappings(ctx context.Context, m map[core.UnversionedObjectID]ChecksumPath)
++ // DeleteMapping removes the mapping for the given id.
++ DeleteMapping(ctx context.Context, id core.UnversionedObjectID)
++}
++
++// ChecksumPath is a tuple of a given Checksum and relative file Path,
++// for use in MappedFileFinder.
++type ChecksumPath struct {
++ // Checksum is the checksum of the file at the given path.
++ //
++ // What the checksum is is application-dependent, however, it
++ // should be the same for two invocations, as long as the stored
++ // data is the same. It might change over time although the
++ // underlying data did not. Examples of checksums that can be
++ // used is: the file modification timestamp, a sha256sum of the
++ // file content, or the latest Git commit when the file was
++ // changed.
++ //
++ // The checksum is calculated by the filesystem.Filesystem.
++ Checksum string
++ // Path to the file, relative to filesystem.Filesystem.RootDirectory().
++ Path string
++}
++
++type ChecksumPathID struct {
++ ChecksumPath
++ ID core.ObjectID
++}
+diff --git a/pkg/storage/filesystem/unstructured/mapped_cache.go b/pkg/storage/filesystem/unstructured/mapped_cache.go
+new file mode 100644
+index 0000000..08aeb83
+--- /dev/null
++++ b/pkg/storage/filesystem/unstructured/mapped_cache.go
+@@ -0,0 +1,104 @@
++package unstructured
++
++import "github.com/weaveworks/libgitops/pkg/storage/core"
++
++// This file contains a set of private interfaces and implementations
++// that allows caching mappings between a core.UnversionedObjectID
++// and a ChecksumPath.
++
++// TODO: rename this interface
++type branch interface {
++ groupKind(core.GroupKind) groupKind
++ raw() map[core.GroupKind]groupKind
++}
++
++type groupKind interface {
++ namespace(string) namespace
++ raw() map[string]namespace
++}
++
++type namespace interface {
++ name(string) (ChecksumPath, bool)
++ setName(string, ChecksumPath)
++ deleteName(string)
++ raw() map[string]ChecksumPath
++}
++
++type branchImpl struct {
++ m map[core.GroupKind]groupKind
++}
++
++func (b *branchImpl) groupKind(gk core.GroupKind) groupKind {
++ if b.m == nil {
++ b.m = make(map[core.GroupKind]groupKind)
++ }
++ val, ok := b.m[gk]
++ if !ok {
++ val = &groupKindImpl{}
++ b.m[gk] = val
++ }
++ return val
++}
++
++func (b *branchImpl) raw() map[core.GroupKind]groupKind {
++ if b.m == nil {
++ b.m = make(map[core.GroupKind]groupKind)
++ }
++ return b.m
++}
++
++type groupKindImpl struct {
++ m map[string]namespace
++}
++
++func (g *groupKindImpl) namespace(ns string) namespace {
++ if g.m == nil {
++ g.m = make(map[string]namespace)
++ }
++ val, ok := g.m[ns]
++ if !ok {
++ val = &namespaceImpl{}
++ g.m[ns] = val
++ }
++ return val
++}
++
++func (g *groupKindImpl) raw() map[string]namespace {
++ if g.m == nil {
++ g.m = make(map[string]namespace)
++ }
++ return g.m
++}
++
++type namespaceImpl struct {
++ m map[string]ChecksumPath
++}
++
++func (n *namespaceImpl) name(name string) (ChecksumPath, bool) {
++ if n.m == nil {
++ n.m = make(map[string]ChecksumPath)
++ }
++ cp, ok := n.m[name]
++ return cp, ok
++}
++
++func (n *namespaceImpl) setName(name string, cp ChecksumPath) {
++ if n.m == nil {
++ n.m = make(map[string]ChecksumPath)
++ }
++ n.m[name] = cp
++}
++
++func (n *namespaceImpl) deleteName(name string) {
++ if n.m == nil {
++ n.m = make(map[string]ChecksumPath)
++ }
++ delete(n.m, name)
++}
++
++func (n *namespaceImpl) raw() map[string]ChecksumPath {
++ if n.m == nil {
++ n.m = make(map[string]ChecksumPath)
++ }
++ return n.m
++}
+diff --git a/pkg/storage/filesystem/unstructured/storage.go b/pkg/storage/filesystem/unstructured/storage.go
+new file mode 100644
+index 0000000..9109734
+--- /dev/null
++++ b/pkg/storage/filesystem/unstructured/storage.go
+@@ -0,0 +1,120 @@
++package unstructured
++
++import (
++ "context"
++ "errors"
++ "fmt"
++
++ "github.com/sirupsen/logrus"
++ "github.com/weaveworks/libgitops/pkg/storage/core"
++ "github.com/weaveworks/libgitops/pkg/storage/filesystem"
++)
++
++func NewGeneric(storage filesystem.Storage, recognizer core.ObjectRecognizer, pathExcluder filesystem.PathExcluder) (Storage, error) {
++ if storage == nil {
++ return nil, fmt.Errorf("storage is mandatory")
++ }
++ if recognizer == nil {
++ return nil, fmt.Errorf("recognizer is mandatory")
++ }
++ mappedFileFinder, ok := storage.FileFinder().(MappedFileFinder)
++ if !ok {
++ return nil, errors.New("the given filesystem.Storage must use a MappedFileFinder")
++ }
++ return &Generic{
++ Storage: storage,
++ recognizer: recognizer,
++ mappedFileFinder: mappedFileFinder,
++ pathExcluder: pathExcluder,
++ }, nil
++}
++
++type Generic struct {
++ filesystem.Storage
++ recognizer core.ObjectRecognizer
++ mappedFileFinder MappedFileFinder
++ pathExcluder filesystem.PathExcluder
++}
++
++// Sync synchronizes the current state of the filesystem with the
++// cached mappings in the MappedFileFinder.
++func (s *Generic) Sync(ctx context.Context) ([]ChecksumPathID, error) {
++ fileFinder := s.MappedFileFinder()
++
++ // List all valid files in the fs
++ files, err := filesystem.ListValidFilesInFilesystem(
++ ctx,
++ fileFinder.Filesystem(),
++ fileFinder.ContentTyper(),
++ s.PathExcluder(),
++ )
++ if err != nil {
++ return nil, err
++ }
++
++ // Send SYNC events for all files (and fill the mappings
++ // of the MappedFileFinder) before starting to monitor changes
++ updatedFiles := make([]ChecksumPathID, 0, len(files))
++ for _, filePath := range files {
++ // Get the current checksum of the file
++ currentChecksum, err := fileFinder.Filesystem().Checksum(ctx, filePath)
++ if err != nil {
++ logrus.Errorf("Could not get checksum for file %q: %v", filePath, err)
++ continue
++ }
++
++ // If the given file already is tracked; i.e. has a mapping with a
++ // non-empty checksum, and the current checksum matches, we do not
++ // need to do anything.
++ if id, err := fileFinder.ObjectAt(ctx, filePath); err == nil {
++ if cp, ok := fileFinder.GetMapping(ctx, id); ok && len(cp.Checksum) != 0 {
++ if cp.Checksum == currentChecksum {
++ logrus.Tracef("Checksum for file %q is up-to-date: %q, skipping...", filePath, cp.Checksum)
++ continue
++ }
++ }
++ }
++
++ // If the file is not known to the FileFinder yet, or if the checksum
++ // was empty, read the file, and recognize it.
++ content, err := s.FileFinder().Filesystem().ReadFile(ctx, filePath)
++ if err != nil {
++ logrus.Warnf("Ignoring %q: %v", filePath, err)
++ continue
++ }
++
++ id, err := s.recognizer.ResolveObjectID(ctx, filePath, content)
++ if err != nil {
++ logrus.Warnf("Could not recognize object ID in %q: %v", filePath, err)
++ continue
++ }
++
++ // Add a mapping between this object and path
++ cp := ChecksumPath{
++ Checksum: currentChecksum,
++ Path: filePath,
++ }
++ s.MappedFileFinder().SetMapping(ctx, id, cp)
++ // Add to the slice which we'll return
++ updatedFiles = append(updatedFiles, ChecksumPathID{
++ ChecksumPath: cp,
++ ID: id,
++ })
++ }
++ return updatedFiles, nil
++}
++
++// ObjectRecognizer returns the underlying ObjectRecognizer used.
++func (s *Generic) ObjectRecognizer() core.ObjectRecognizer {
++ return s.recognizer
++}
++
++// PathExcluder specifies what paths to not sync
++func (s *Generic) PathExcluder() filesystem.PathExcluder {
++ return s.pathExcluder
++}
++
++// MappedFileFinder returns the underlying MappedFileFinder used.
++func (s *Generic) MappedFileFinder() MappedFileFinder {
++ return s.mappedFileFinder
++}
+diff --git a/pkg/storage/format.go b/pkg/storage/format.go
+deleted file mode 100644
+index 84993ce..0000000
+--- a/pkg/storage/format.go
++++ /dev/null
+@@ -1,20 +0,0 @@
+-package storage
+-
+-import "github.com/weaveworks/libgitops/pkg/serializer"
+-
+-// ContentTypes describes the connection between
+-// file extensions and a content types.
+-var ContentTypes = map[string]serializer.ContentType{
+- ".json": serializer.ContentTypeJSON,
+- ".yaml": serializer.ContentTypeYAML,
+- ".yml": serializer.ContentTypeYAML,
+-}
+-
+-func extForContentType(wanted serializer.ContentType) string {
+- for ext, ct := range ContentTypes {
+- if ct == wanted {
+- return ext
+- }
+- }
+- return ""
+-}
+diff --git a/pkg/storage/interfaces.go b/pkg/storage/interfaces.go
+new file mode 100644
+index 0000000..c5698e0
+--- /dev/null
++++ b/pkg/storage/interfaces.go
+@@ -0,0 +1,103 @@
++package storage
++
++import (
++ "context"
++ "errors"
++
++ "github.com/weaveworks/libgitops/pkg/serializer"
++ "github.com/weaveworks/libgitops/pkg/storage/core"
++ "k8s.io/apimachinery/pkg/util/sets"
++)
++
++var (
++ // ErrNamespacedMismatch is returned by Storage methods if the given UnversionedObjectID
++ // carries invalid data, according to the Namespacer.
++ ErrNamespacedMismatch = errors.New("mismatch between namespacing info for object and the given parameter")
++)
++
++// Storage is a Key-indexed low-level interface to
++// store byte-encoded Objects (resources) in non-volatile
++// memory.
++//
++// This Storage operates entirely on GroupKinds; without enforcing
++// a specific version of the encoded data format. This is possible
++// with the assumption that any older format stored at disk can be
++// read successfully and converted into a more recent version.
++//
++// TODO: Add thread-safety so it is not possible to issue a Write() or Delete()
++// at the same time as any other read operation.
++type Storage interface {
++ Reader
++ Writer
++}
++
++// StorageCommon is an interface that contains the resources both needed
++// by Reader and Writer.
++type StorageCommon interface {
++ // Namespacer gives access to the namespacer that is used
++ Namespacer() core.Namespacer
++ // Exists checks if the resource indicated by the ID exists.
++ Exists(ctx context.Context, id core.UnversionedObjectID) bool
++}
++
++// Reader provides the read operations for the Storage.
++type Reader interface {
++ StorageCommon
++
++ // Read returns a resource's content based on the ID.
++ // If the resource does not exist, it returns core.NewErrNotFound.
++ Read(ctx context.Context, id core.UnversionedObjectID) ([]byte, error)
++
++ // Checksum returns a checksum of the Object with the given ID.
++ //
++ // What the checksum is is application-dependent, however, it
++ // should be the same for two invocations, as long as the stored
++ // data is the same. It might change over time although the
++ // underlying data did not. Examples of checksums that can be
++ // used is: the file modification timestamp, a sha256sum of the
++ // file content, or the latest Git commit when the file was
++ // changed.
++ Checksum(ctx context.Context, id core.UnversionedObjectID) (string, error)
++
++ // ContentType returns the content type that should be used when serializing
++ // the object with the given ID. This operation must function also before the
++ // Object with the given id exists in the system, in order to be able to
++ // create new Objects.
++ ContentType(ctx context.Context, id core.UnversionedObjectID) (serializer.ContentType, error)
++
++ // List operations
++ Lister
++}
++
++type Lister interface {
++ // ListNamespaces lists the available namespaces for the given GroupKind.
++ // This function shall only be called for namespaced objects, it is up to
++ // the caller to make sure they do not call this method for root-spaced
++ // objects. If any of the given rules are violated, ErrNamespacedMismatch
++ // should be returned as a wrapped error.
++ //
++ // The implementer can choose between basing the answer strictly on e.g.
++ // v1.Namespace objects that exist in the system, or just the set of
++ // different namespaces that have been set on any object belonging to
++ // the given GroupKind.
++ ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.String, error)
++
++ // ListObjectIDs returns a list of unversioned ObjectIDs.
++ // For namespaced GroupKinds, the caller must provide a namespace, and for
++ // root-spaced GroupKinds, the caller must not. When namespaced, this function
++ // must only return object IDs for that given namespace. If any of the given
++ // rules are violated, ErrNamespacedMismatch should be returned as a wrapped error.
++ ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) ([]core.UnversionedObjectID, error)
++}
++
++// Reader provides the write operations for the Storage.
++type Writer interface {
++ StorageCommon
++
++ // Write writes the given content to the resource indicated by the ID.
++ // Error returns are implementation-specific.
++ Write(ctx context.Context, id core.UnversionedObjectID, content []byte) error
++ // Delete deletes the resource indicated by the ID.
++ // If the resource does not exist, it returns ErrNotFound.
++ Delete(ctx context.Context, id core.UnversionedObjectID) error
++}
+diff --git a/pkg/storage/key.go b/pkg/storage/key.go
+deleted file mode 100644
+index 015cac4..0000000
+--- a/pkg/storage/key.go
++++ /dev/null
+@@ -1,64 +0,0 @@
+-package storage
+-
+-import (
+- "github.com/weaveworks/libgitops/pkg/runtime"
+- "k8s.io/apimachinery/pkg/runtime/schema"
+-)
+-
+-type kindKey schema.GroupVersionKind
+-
+-func (gvk kindKey) GetGroup() string { return gvk.Group }
+-func (gvk kindKey) GetVersion() string { return gvk.Version }
+-func (gvk kindKey) GetKind() string { return gvk.Kind }
+-func (gvk kindKey) GetGVK() schema.GroupVersionKind { return schema.GroupVersionKind(gvk) }
+-func (gvk kindKey) EqualsGVK(kind KindKey, respectVersion bool) bool {
+- // Make sure kind and group match, otherwise return false
+- if gvk.GetKind() != kind.GetKind() || gvk.GetGroup() != kind.GetGroup() {
+- return false
+- }
+- // If we allow version mismatches (i.e. don't need to respect the version), return true
+- if !respectVersion {
+- return true
+- }
+- // Otherwise, return true if the version also is the same
+- return gvk.GetVersion() == kind.GetVersion()
+-}
+-func (gvk kindKey) String() string { return gvk.GetGVK().String() }
+-
+-// kindKey implements KindKey.
+-var _ KindKey = kindKey{}
+-
+-type KindKey interface {
+- // String implements fmt.Stringer
+- String() string
+-
+- GetGroup() string
+- GetVersion() string
+- GetKind() string
+- GetGVK() schema.GroupVersionKind
+-
+- EqualsGVK(kind KindKey, respectVersion bool) bool
+-}
+-
+-type ObjectKey interface {
+- KindKey
+- runtime.Identifyable
+-}
+-
+-// objectKey implements ObjectKey.
+-var _ ObjectKey = &objectKey{}
+-
+-type objectKey struct {
+- KindKey
+- runtime.Identifyable
+-}
+-
+-func (key objectKey) String() string { return key.KindKey.String() + " " + key.GetIdentifier() }
+-
+-func NewKindKey(gvk schema.GroupVersionKind) KindKey {
+- return kindKey(gvk)
+-}
+-
+-func NewObjectKey(kind KindKey, id runtime.Identifyable) ObjectKey {
+- return objectKey{kind, id}
+-}
+diff --git a/pkg/storage/kube/namespaces.go b/pkg/storage/kube/namespaces.go
+new file mode 100644
+index 0000000..3e509ce
+--- /dev/null
++++ b/pkg/storage/kube/namespaces.go
+@@ -0,0 +1,111 @@
++package kube
++
++import (
++ "sync"
++
++ "github.com/weaveworks/libgitops/pkg/storage/backend"
++ "github.com/weaveworks/libgitops/pkg/storage/core"
++ "k8s.io/apimachinery/pkg/api/meta"
++ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
++ "k8s.io/apimachinery/pkg/runtime/schema"
++)
++
++// TODO: Make an example component that iterates through all of a raw.Storage's
++// or FileFinder's objects, and just reads them, converts them into the current
++// hub version.
++
++// TODO: Make a composite Storage that encrypts secrets using a key
++
++// NewNamespaceEnforcer returns a backend.NamespaceEnforcer that
++// enforces namespacing rules (approximately) in the same way as
++// Kubernetes itself does. The following rules are applied:
++//
++// if object is namespaced {
++// if .metadata.namespace == "" {
++// .metadata.namespace = "default"
++// } else { // .metadata.namespace != ""
++// Make sure that such a v1.Namespace object
++// exists in the system.
++// }
++// } else { // object is non-namespaced
++// if .metadata.namespace != "" {
++// .metadata.namespace = ""
++// }
++// }
++//
++// Underneath, backend.GenericNamespaceEnforcer is used. Refer
++// to the documentation of that if you want the functionality
++// to be slightly different. (e.g. any namespace value is valid).
++//
++// TODO: Maybe we want to validate the namespace string itself?
++func NewNamespaceEnforcer() backend.NamespaceEnforcer {
++ return backend.GenericNamespaceEnforcer{
++ DefaultNamespace: metav1.NamespaceDefault,
++ NamespaceGroupKind: &core.GroupKind{
++ Group: "", // legacy name for the core API group
++ Kind: "Namespace",
++ },
++ }
++}
++
++// SimpleRESTMapper is a subset of the meta.RESTMapper interface
++type SimpleRESTMapper interface {
++ // RESTMapping identifies a preferred resource mapping for the provided group kind.
++ RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error)
++}
++
++// RESTMapperToNamespacer implements the Namespacer interface by fetching (and caching) data
++// from the given RESTMapper interface, that is compatible with any meta.RESTMapper implementation.
++// This allows you to e.g. pass in a meta.RESTMapper yielded from
++// sigs.k8s.io/controller-runtime/pkg/client/apiutil.NewDiscoveryRESTMapper(c *rest.Config), or
++// k8s.io/client-go/restmapper.NewDiscoveryRESTMapper(groups []*restmapper.APIGroupResources)
++// in order to look up namespacing information from either a running API server, or statically, from
++// the list of restmapper.APIGroupResources.
++func RESTMapperToNamespacer(mapper SimpleRESTMapper) core.Namespacer {
++ return &restNamespacer{
++ mapper: mapper,
++ mappingByType: make(map[schema.GroupKind]*meta.RESTMapping),
++ mu: &sync.RWMutex{},
++ }
++}
++
++var _ core.Namespacer = &restNamespacer{}
++
++type restNamespacer struct {
++ mapper SimpleRESTMapper
++
++ mappingByType map[schema.GroupKind]*meta.RESTMapping
++ mu *sync.RWMutex
++}
++
++func (n *restNamespacer) IsNamespaced(gk schema.GroupKind) (bool, error) {
++ m, err := n.getMapping(gk)
++ if err != nil {
++ return false, err
++ }
++ return mappingNamespaced(m), nil
++}
++
++func (n *restNamespacer) getMapping(gk schema.GroupKind) (*meta.RESTMapping, error) {
++ n.mu.RLock()
++ mapping, ok := n.mappingByType[gk]
++ n.mu.RUnlock()
++ // If already cached, we're ok
++ if ok {
++ return mapping, nil
++ }
++
++ // Write the mapping info to our cache
++ n.mu.Lock()
++ defer n.mu.Unlock()
++ m, err := n.mapper.RESTMapping(gk)
++ if err != nil {
++ return nil, err
++ }
++ n.mappingByType[gk] = m
++ return m, nil
++}
++
++func mappingNamespaced(mapping *meta.RESTMapping) bool {
++ return mapping.Scope.Name() == meta.RESTScopeNameNamespace
++}
+diff --git a/pkg/storage/mappedrawstorage.go b/pkg/storage/mappedrawstorage.go
+deleted file mode 100644
+index d41641c..0000000
+--- a/pkg/storage/mappedrawstorage.go
++++ /dev/null
+@@ -1,177 +0,0 @@
+-package storage
+-
+-import (
+- "fmt"
+- "io/ioutil"
+- "os"
+- "path/filepath"
+- "sync"
+-
+- log "github.com/sirupsen/logrus"
+- "github.com/weaveworks/libgitops/pkg/serializer"
+- "github.com/weaveworks/libgitops/pkg/util"
+-)
+-
+-var (
+- // ErrNotTracked is returned when the requested resource wasn't found.
+- ErrNotTracked = fmt.Errorf("untracked object: %w", ErrNotFound)
+-)
+-
+-// MappedRawStorage is an interface for RawStorages which store their
+-// data in a flat/unordered directory format like manifest directories.
+-type MappedRawStorage interface {
+- RawStorage
+-
+- // AddMapping binds a Key's virtual path to a physical file path
+- AddMapping(key ObjectKey, path string)
+- // RemoveMapping removes the physical file
+- // path mapping matching the given Key
+- RemoveMapping(key ObjectKey)
+-
+- // SetMappings overwrites all known mappings
+- SetMappings(m map[ObjectKey]string)
+-}
+-
+-func NewGenericMappedRawStorage(dir string) MappedRawStorage {
+- return &GenericMappedRawStorage{
+- dir: dir,
+- fileMappings: make(map[ObjectKey]string),
+- mux: &sync.Mutex{},
+- }
+-}
+-
+-// GenericMappedRawStorage is the default implementation of a MappedRawStorage,
+-// it stores files in the given directory via a path translation map.
+-type GenericMappedRawStorage struct {
+- dir string
+- fileMappings map[ObjectKey]string
+- mux *sync.Mutex
+-}
+-
+-func (r *GenericMappedRawStorage) realPath(key ObjectKey) (string, error) {
+- r.mux.Lock()
+- path, ok := r.fileMappings[key]
+- r.mux.Unlock()
+- if !ok {
+- return "", fmt.Errorf("GenericMappedRawStorage: cannot resolve %q: %w", key, ErrNotTracked)
+- }
+-
+- return path, nil
+-}
+-
+-// If the file doesn't exist, returns ErrNotFound + ErrNotTracked.
+-func (r *GenericMappedRawStorage) Read(key ObjectKey) ([]byte, error) {
+- file, err := r.realPath(key)
+- if err != nil {
+- return nil, err
+- }
+-
+- return ioutil.ReadFile(file)
+-}
+-
+-func (r *GenericMappedRawStorage) Exists(key ObjectKey) bool {
+- file, err := r.realPath(key)
+- if err != nil {
+- return false
+- }
+-
+- return util.FileExists(file)
+-}
+-
+-func (r *GenericMappedRawStorage) Write(key ObjectKey, content []byte) error {
+- // GenericMappedRawStorage isn't going to generate files itself,
+- // only write if the file is already known
+- file, err := r.realPath(key)
+- if err != nil {
+- return err
+- }
+-
+- return ioutil.WriteFile(file, content, 0644)
+-}
+-
+-// If the file doesn't exist, returns ErrNotFound + ErrNotTracked.
+-func (r *GenericMappedRawStorage) Delete(key ObjectKey) (err error) {
+- file, err := r.realPath(key)
+- if err != nil {
+- return
+- }
+-
+- // GenericMappedRawStorage files can be deleted
+- // externally, check that the file exists first
+- if util.FileExists(file) {
+- err = os.Remove(file)
+- }
+-
+- if err == nil {
+- r.RemoveMapping(key)
+- }
+-
+- return
+-}
+-
+-func (r *GenericMappedRawStorage) List(kind KindKey) ([]ObjectKey, error) {
+- result := make([]ObjectKey, 0)
+-
+- for key := range r.fileMappings {
+- // Include objects with the same kind and group, ignore version mismatches
+- if key.EqualsGVK(kind, false) {
+- result = append(result, key)
+- }
+- }
+-
+- return result, nil
+-}
+-
+-// This returns the modification time as a UnixNano string.
+-// If the file doesn't exist, returns ErrNotFound + ErrNotTracked.
+-func (r *GenericMappedRawStorage) Checksum(key ObjectKey) (string, error) {
+- path, err := r.realPath(key)
+- if err != nil {
+- return "", err
+- }
+-
+- return checksumFromModTime(path)
+-}
+-
+-func (r *GenericMappedRawStorage) ContentType(key ObjectKey) (ct serializer.ContentType) {
+- if file, err := r.realPath(key); err == nil {
+- ct = ContentTypes[filepath.Ext(file)] // Retrieve the correct format based on the extension
+- }
+-
+- return
+-}
+-
+-func (r *GenericMappedRawStorage) WatchDir() string {
+- return r.dir
+-}
+-
+-func (r *GenericMappedRawStorage) GetKey(path string) (ObjectKey, error) {
+- for key, p := range r.fileMappings {
+- if p == path {
+- return key, nil
+- }
+- }
+-
+- return objectKey{}, fmt.Errorf("no mapping found for path %q", path)
+-}
+-
+-func (r *GenericMappedRawStorage) AddMapping(key ObjectKey, path string) {
+- log.Debugf("GenericMappedRawStorage: AddMapping: %q -> %q", key, path)
+- r.mux.Lock()
+- r.fileMappings[key] = path
+- r.mux.Unlock()
+-}
+-
+-func (r *GenericMappedRawStorage) RemoveMapping(key ObjectKey) {
+- log.Debugf("GenericMappedRawStorage: RemoveMapping: %q", key)
+- r.mux.Lock()
+- delete(r.fileMappings, key)
+- r.mux.Unlock()
+-}
+-
+-func (r *GenericMappedRawStorage) SetMappings(m map[ObjectKey]string) {
+- log.Debugf("GenericMappedRawStorage: SetMappings: %v", m)
+- r.mux.Lock()
+- r.fileMappings = m
+- r.mux.Unlock()
+-}
+diff --git a/pkg/storage/rawstorage.go b/pkg/storage/rawstorage.go
+deleted file mode 100644
+index 9330433..0000000
+--- a/pkg/storage/rawstorage.go
++++ /dev/null
+@@ -1,217 +0,0 @@
+-package storage
+-
+-import (
+- "fmt"
+- "io/ioutil"
+- "os"
+- "path"
+- "path/filepath"
+- "strconv"
+- "strings"
+-
+- "github.com/weaveworks/libgitops/pkg/runtime"
+- "github.com/weaveworks/libgitops/pkg/serializer"
+- "github.com/weaveworks/libgitops/pkg/util"
+- "k8s.io/apimachinery/pkg/runtime/schema"
+-)
+-
+-// RawStorage is a Key-indexed low-level interface to
+-// store byte-encoded Objects (resources) in non-volatile
+-// memory.
+-type RawStorage interface {
+- // Read returns a resource's content based on key.
+- // If the resource does not exist, it returns ErrNotFound.
+- Read(key ObjectKey) ([]byte, error)
+- // Exists checks if the resource indicated by key exists.
+- Exists(key ObjectKey) bool
+- // Write writes the given content to the resource indicated by key.
+- // Error returns are implementation-specific.
+- Write(key ObjectKey, content []byte) error
+- // Delete deletes the resource indicated by key.
+- // If the resource does not exist, it returns ErrNotFound.
+- Delete(key ObjectKey) error
+- // List returns all matching object keys based on the given KindKey.
+- List(key KindKey) ([]ObjectKey, error)
+- // Checksum returns a string checksum for the resource indicated by key.
+- // If the resource does not exist, it returns ErrNotFound.
+- Checksum(key ObjectKey) (string, error)
+- // ContentType returns the content type of the contents of the resource indicated by key.
+- ContentType(key ObjectKey) serializer.ContentType
+-
+- // WatchDir returns the path for Watchers to watch changes in.
+- WatchDir() string
+- // GetKey retrieves the Key containing the virtual path based
+- // on the given physical file path returned by a Watcher.
+- GetKey(path string) (ObjectKey, error)
+-}
+-
+-func NewGenericRawStorage(dir string, gv schema.GroupVersion, ct serializer.ContentType) RawStorage {
+- ext := extForContentType(ct)
+- if ext == "" {
+- panic("Invalid content type")
+- }
+- return &GenericRawStorage{
+- dir: dir,
+- gv: gv,
+- ct: ct,
+- ext: ext,
+- }
+-}
+-
+-// GenericRawStorage is a rawstorage which stores objects as JSON files on disk,
+-// in the form: ///metadata.json.
+-// The GenericRawStorage only supports one GroupVersion at a time, and will error if given
+-// any other resources
+-type GenericRawStorage struct {
+- dir string
+- gv schema.GroupVersion
+- ct serializer.ContentType
+- ext string
+-}
+-
+-func (r *GenericRawStorage) keyPath(key ObjectKey) string {
+- return path.Join(r.dir, key.GetKind(), key.GetIdentifier(), fmt.Sprintf("metadata%s", r.ext))
+-}
+-
+-func (r *GenericRawStorage) kindKeyPath(kindKey KindKey) string {
+- return path.Join(r.dir, kindKey.GetKind())
+-}
+-
+-func (r *GenericRawStorage) validateGroupVersion(kind KindKey) error {
+- if r.gv.Group == kind.GetGroup() && r.gv.Version == kind.GetVersion() {
+- return nil
+- }
+-
+- return fmt.Errorf("GroupVersion %s/%s not supported by this GenericRawStorage", kind.GetGroup(), kind.GetVersion())
+-}
+-
+-func (r *GenericRawStorage) Read(key ObjectKey) ([]byte, error) {
+- // Validate GroupVersion first
+- if err := r.validateGroupVersion(key); err != nil {
+- return nil, err
+- }
+-
+- // Check if the resource indicated by key exists
+- if !r.Exists(key) {
+- return nil, ErrNotFound
+- }
+-
+- return ioutil.ReadFile(r.keyPath(key))
+-}
+-
+-func (r *GenericRawStorage) Exists(key ObjectKey) bool {
+- // Validate GroupVersion first
+- if err := r.validateGroupVersion(key); err != nil {
+- return false
+- }
+-
+- return util.FileExists(r.keyPath(key))
+-}
+-
+-func (r *GenericRawStorage) Write(key ObjectKey, content []byte) error {
+- // Validate GroupVersion first
+- if err := r.validateGroupVersion(key); err != nil {
+- return err
+- }
+-
+- file := r.keyPath(key)
+-
+- // Create the underlying directories if they do not exist already
+- if !r.Exists(key) {
+- if err := os.MkdirAll(path.Dir(file), 0755); err != nil {
+- return err
+- }
+- }
+-
+- return ioutil.WriteFile(file, content, 0644)
+-}
+-
+-func (r *GenericRawStorage) Delete(key ObjectKey) error {
+- // Validate GroupVersion first
+- if err := r.validateGroupVersion(key); err != nil {
+- return err
+- }
+-
+- // Check if the resource indicated by key exists
+- if !r.Exists(key) {
+- return ErrNotFound
+- }
+-
+- return os.RemoveAll(path.Dir(r.keyPath(key)))
+-}
+-
+-func (r *GenericRawStorage) List(kind KindKey) ([]ObjectKey, error) {
+- // Validate GroupVersion first
+- if err := r.validateGroupVersion(kind); err != nil {
+- return nil, err
+- }
+-
+- entries, err := ioutil.ReadDir(r.kindKeyPath(kind))
+- if err != nil {
+- return nil, err
+- }
+-
+- result := make([]ObjectKey, 0, len(entries))
+- for _, entry := range entries {
+- result = append(result, NewObjectKey(kind, runtime.NewIdentifier(entry.Name())))
+- }
+-
+- return result, nil
+-}
+-
+-// This returns the modification time as a UnixNano string
+-// If the file doesn't exist, return ErrNotFound
+-func (r *GenericRawStorage) Checksum(key ObjectKey) (string, error) {
+- // Validate GroupVersion first
+- if err := r.validateGroupVersion(key); err != nil {
+- return "", err
+- }
+-
+- // Check if the resource indicated by key exists
+- if !r.Exists(key) {
+- return "", ErrNotFound
+- }
+-
+- return checksumFromModTime(r.keyPath(key))
+-}
+-
+-func (r *GenericRawStorage) ContentType(_ ObjectKey) serializer.ContentType {
+- return r.ct
+-}
+-
+-func (r *GenericRawStorage) WatchDir() string {
+- return r.dir
+-}
+-
+-func (r *GenericRawStorage) GetKey(p string) (ObjectKey, error) {
+- splitDir := strings.Split(filepath.Clean(r.dir), string(os.PathSeparator))
+- splitPath := strings.Split(filepath.Clean(p), string(os.PathSeparator))
+-
+- if len(splitPath) < len(splitDir)+2 {
+- return nil, fmt.Errorf("path not long enough: %s", p)
+- }
+-
+- for i := 0; i < len(splitDir); i++ {
+- if splitDir[i] != splitPath[i] {
+- return nil, fmt.Errorf("path has wrong base: %s", p)
+- }
+- }
+- kind := splitPath[len(splitDir)]
+- uid := splitPath[len(splitDir)+1]
+- gvk := schema.GroupVersionKind{
+- Group: r.gv.Group,
+- Version: r.gv.Version,
+- Kind: kind,
+- }
+-
+- return NewObjectKey(NewKindKey(gvk), runtime.NewIdentifier(uid)), nil
+-}
+-
+-func checksumFromModTime(path string) (string, error) {
+- fi, err := os.Stat(path)
+- if err != nil {
+- return "", err
+- }
+-
+- return strconv.FormatInt(fi.ModTime().UnixNano(), 10), nil
+-}
+diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go
+deleted file mode 100644
+index 4d94232..0000000
+--- a/pkg/storage/storage.go
++++ /dev/null
+@@ -1,454 +0,0 @@
+-package storage
+-
+-import (
+- "bytes"
+- "errors"
+- "fmt"
+- "io"
+-
+- "github.com/sirupsen/logrus"
+- "github.com/weaveworks/libgitops/pkg/filter"
+- "github.com/weaveworks/libgitops/pkg/runtime"
+- "github.com/weaveworks/libgitops/pkg/serializer"
+- patchutil "github.com/weaveworks/libgitops/pkg/util/patch"
+- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+- kruntime "k8s.io/apimachinery/pkg/runtime"
+- "k8s.io/apimachinery/pkg/runtime/schema"
+-)
+-
+-var (
+- // ErrAmbiguousFind is returned when the user requested one object from a List+Filter process.
+- ErrAmbiguousFind = errors.New("two or more results were aquired when one was expected")
+- // ErrNotFound is returned when the requested resource wasn't found.
+- ErrNotFound = errors.New("resource not found")
+- // ErrAlreadyExists is returned when when WriteStorage.Create is called for an already stored object.
+- ErrAlreadyExists = errors.New("resource already exists")
+-)
+-
+-type ReadStorage interface {
+- // Get returns a new Object for the resource at the specified kind/uid path, based on the file content.
+- // If the resource referred to by the given ObjectKey does not exist, Get returns ErrNotFound.
+- Get(key ObjectKey) (runtime.Object, error)
+-
+- // List lists Objects for the specific kind. Optionally, filters can be applied (see the filter package
+- // for more information, e.g. filter.NameFilter{} and filter.UIDFilter{})
+- List(kind KindKey, opts ...filter.ListOption) ([]runtime.Object, error)
+-
+- // Find does a List underneath, also using filters, but always returns one object. If the List
+- // underneath returned two or more results, ErrAmbiguousFind is returned. If no match was found,
+- // ErrNotFound is returned.
+- Find(kind KindKey, opts ...filter.ListOption) (runtime.Object, error)
+-
+- //
+- // Partial object getters.
+- // TODO: Figure out what we should do with these, do we need them and if so where?
+- //
+-
+- // GetMeta returns a new Object's APIType representation for the resource at the specified kind/uid path.
+- // If the resource referred to by the given ObjectKey does not exist, GetMeta returns ErrNotFound.
+- GetMeta(key ObjectKey) (runtime.PartialObject, error)
+- // ListMeta lists all Objects' APIType representation. In other words,
+- // only metadata about each Object is unmarshalled (uid/name/kind/apiVersion).
+- // This allows for faster runs (no need to unmarshal "the world"), and less
+- // resource usage, when only metadata is unmarshalled into memory
+- ListMeta(kind KindKey) ([]runtime.PartialObject, error)
+-
+- //
+- // Cache-related methods.
+- //
+-
+- // Checksum returns a string representing the state of an Object on disk
+- // The checksum should change if any modifications have been made to the
+- // Object on disk, it can be e.g. the Object's modification timestamp or
+- // calculated checksum. If the Object is not found, ErrNotFound is returned.
+- Checksum(key ObjectKey) (string, error)
+- // Count returns the amount of available Objects of a specific kind
+- // This is used by Caches to check if all Objects are cached to perform a List
+- Count(kind KindKey) (uint64, error)
+-
+- //
+- // Access to underlying Resources.
+- //
+-
+- // RawStorage returns the RawStorage instance backing this Storage
+- RawStorage() RawStorage
+- // Serializer returns the serializer
+- Serializer() serializer.Serializer
+-
+- //
+- // Misc methods.
+- //
+-
+- // ObjectKeyFor returns the ObjectKey for the given object
+- ObjectKeyFor(obj runtime.Object) (ObjectKey, error)
+- // Close closes all underlying resources (e.g. goroutines) used; before the application exits
+- Close() error
+-}
+-
+-type WriteStorage interface {
+- // Create creates an entry for and stores the given Object in the storage. The Object must be new to the storage.
+- // The ObjectMeta.CreationTimestamp field is set automatically to the current time if it is unset.
+- Create(obj runtime.Object) error
+- // Update updates the state of the given Object in the storage. The Object must exist in the storage.
+- // The ObjectMeta.CreationTimestamp field is set automatically to the current time if it is unset.
+- Update(obj runtime.Object) error
+-
+- // Patch performs a strategic merge patch on the Object with the given UID, using the byte-encoded patch given
+- Patch(key ObjectKey, patch []byte) error
+- // Delete removes an Object from the storage
+- Delete(key ObjectKey) error
+-}
+-
+-// Storage is an interface for persisting and retrieving API objects to/from a backend
+-// One Storage instance handles all different Kinds of Objects
+-type Storage interface {
+- ReadStorage
+- WriteStorage
+-}
+-
+-// NewGenericStorage constructs a new Storage
+-func NewGenericStorage(rawStorage RawStorage, serializer serializer.Serializer, identifiers []runtime.IdentifierFactory) Storage {
+- return &GenericStorage{rawStorage, serializer, patchutil.NewPatcher(serializer), identifiers}
+-}
+-
+-// GenericStorage implements the Storage interface
+-type GenericStorage struct {
+- raw RawStorage
+- serializer serializer.Serializer
+- patcher patchutil.Patcher
+- identifiers []runtime.IdentifierFactory
+-}
+-
+-var _ Storage = &GenericStorage{}
+-
+-func (s *GenericStorage) Serializer() serializer.Serializer {
+- return s.serializer
+-}
+-
+-// Get returns a new Object for the resource at the specified kind/uid path, based on the file content
+-func (s *GenericStorage) Get(key ObjectKey) (runtime.Object, error) {
+- content, err := s.raw.Read(key)
+- if err != nil {
+- return nil, err
+- }
+-
+- return s.decode(key, content)
+-}
+-
+-// TODO: Verify this works
+-// GetMeta returns a new Object's APIType representation for the resource at the specified kind/uid path
+-func (s *GenericStorage) GetMeta(key ObjectKey) (runtime.PartialObject, error) {
+- content, err := s.raw.Read(key)
+- if err != nil {
+- return nil, err
+- }
+-
+- return s.decodeMeta(key, content)
+-}
+-
+-// TODO: Make sure we don't save a partial object
+-func (s *GenericStorage) write(key ObjectKey, obj runtime.Object) error {
+- // Set the content type based on the format given by the RawStorage, but default to JSON
+- contentType := serializer.ContentTypeJSON
+- if ct := s.raw.ContentType(key); len(ct) != 0 {
+- contentType = ct
+- }
+-
+- // Set creationTimestamp if not already populated
+- t := obj.GetCreationTimestamp()
+- if t.IsZero() {
+- obj.SetCreationTimestamp(metav1.Now())
+- }
+-
+- var objBytes bytes.Buffer
+- err := s.serializer.Encoder().Encode(serializer.NewFrameWriter(contentType, &objBytes), obj)
+- if err != nil {
+- return err
+- }
+-
+- return s.raw.Write(key, objBytes.Bytes())
+-}
+-
+-func (s *GenericStorage) Create(obj runtime.Object) error {
+- key, err := s.ObjectKeyFor(obj)
+- if err != nil {
+- return err
+- }
+-
+- if s.raw.Exists(key) {
+- return ErrAlreadyExists
+- }
+-
+- // The object was not found so we can safely create it
+- return s.write(key, obj)
+-}
+-
+-func (s *GenericStorage) Update(obj runtime.Object) error {
+- key, err := s.ObjectKeyFor(obj)
+- if err != nil {
+- return err
+- }
+-
+- if !s.raw.Exists(key) {
+- return ErrNotFound
+- }
+-
+- // The object was found so we can safely update it
+- return s.write(key, obj)
+-}
+-
+-// Patch performs a strategic merge patch on the object with the given UID, using the byte-encoded patch given
+-func (s *GenericStorage) Patch(key ObjectKey, patch []byte) error {
+- oldContent, err := s.raw.Read(key)
+- if err != nil {
+- return err
+- }
+-
+- newContent, err := s.patcher.Apply(oldContent, patch, key.GetGVK())
+- if err != nil {
+- return err
+- }
+-
+- return s.raw.Write(key, newContent)
+-}
+-
+-// Delete removes an Object from the storage
+-func (s *GenericStorage) Delete(key ObjectKey) error {
+- return s.raw.Delete(key)
+-}
+-
+-// Checksum returns a string representing the state of an Object on disk
+-func (s *GenericStorage) Checksum(key ObjectKey) (string, error) {
+- return s.raw.Checksum(key)
+-}
+-
+-func (s *GenericStorage) list(kind KindKey) (result []runtime.Object, walkerr error) {
+- walkerr = s.walkKind(kind, func(key ObjectKey, content []byte) error {
+- obj, err := s.decode(key, content)
+- if err != nil {
+- return err
+- }
+-
+- result = append(result, obj)
+- return nil
+- })
+- return
+-}
+-
+-// List lists Objects for the specific kind. Optionally, filters can be applied (see the filter package
+-// for more information, e.g. filter.NameFilter{} and filter.UIDFilter{})
+-func (s *GenericStorage) List(kind KindKey, opts ...filter.ListOption) ([]runtime.Object, error) {
+- // First, complete the options struct
+- o, err := filter.MakeListOptions(opts...)
+- if err != nil {
+- return nil, err
+- }
+-
+- // Do an internal list to get all objects
+- objs, err := s.list(kind)
+- if err != nil {
+- return nil, err
+- }
+-
+- // For all list filters, pipe the output of the previous as the input to the next, in order.
+- for _, filter := range o.Filters {
+- objs, err = filter.Filter(objs...)
+- if err != nil {
+- return nil, err
+- }
+- }
+- return objs, nil
+-}
+-
+-// Find does a List underneath, also using filters, but always returns one object. If the List
+-// underneath returned two or more results, ErrAmbiguousFind is returned. If no match was found,
+-// ErrNotFound is returned.
+-func (s *GenericStorage) Find(kind KindKey, opts ...filter.ListOption) (runtime.Object, error) {
+- // Do a normal list underneath
+- objs, err := s.List(kind, opts...)
+- if err != nil {
+- return nil, err
+- }
+- // Return based on the object count
+- switch l := len(objs); l {
+- case 0:
+- return nil, fmt.Errorf("no Find match found: %w", ErrNotFound)
+- case 1:
+- return objs[0], nil
+- default:
+- return nil, fmt.Errorf("too many (%d) matches: %v: %w", l, objs, ErrAmbiguousFind)
+- }
+-}
+-
+-// ListMeta lists all Objects' APIType representation. In other words,
+-// only metadata about each Object is unmarshalled (uid/name/kind/apiVersion).
+-// This allows for faster runs (no need to unmarshal "the world"), and less
+-// resource usage, when only metadata is unmarshalled into memory
+-func (s *GenericStorage) ListMeta(kind KindKey) (result []runtime.PartialObject, walkerr error) {
+- walkerr = s.walkKind(kind, func(key ObjectKey, content []byte) error {
+-
+- obj, err := s.decodeMeta(key, content)
+- if err != nil {
+- return err
+- }
+-
+- result = append(result, obj)
+- return nil
+- })
+- return
+-}
+-
+-// Count counts the Objects for the specific kind
+-func (s *GenericStorage) Count(kind KindKey) (uint64, error) {
+- entries, err := s.raw.List(kind)
+- return uint64(len(entries)), err
+-}
+-
+-func (s *GenericStorage) ObjectKeyFor(obj runtime.Object) (ObjectKey, error) {
+- var gvk schema.GroupVersionKind
+- var err error
+-
+- _, isPartialObject := obj.(runtime.PartialObject)
+- if isPartialObject {
+- gvk = obj.GetObjectKind().GroupVersionKind()
+- // TODO: Error if empty
+- } else {
+- gvk, err = serializer.GVKForObject(s.serializer.Scheme(), obj)
+- if err != nil {
+- return nil, err
+- }
+- }
+-
+- id := s.identify(obj)
+- if id == nil {
+- return nil, fmt.Errorf("couldn't identify object")
+- }
+- return NewObjectKey(NewKindKey(gvk), id), nil
+-}
+-
+-// RawStorage returns the RawStorage instance backing this Storage
+-func (s *GenericStorage) RawStorage() RawStorage {
+- return s.raw
+-}
+-
+-// Close closes all underlying resources (e.g. goroutines) used; before the application exits
+-func (s *GenericStorage) Close() error {
+- return nil // nothing to do here for GenericStorage
+-}
+-
+-// identify loops through the identifiers, in priority order, to identify the object correctly
+-func (s *GenericStorage) identify(obj runtime.Object) runtime.Identifyable {
+- for _, identifier := range s.identifiers {
+-
+- id, ok := identifier.Identify(obj)
+- if ok {
+- return id
+- }
+- }
+- return nil
+-}
+-
+-func (s *GenericStorage) decode(key ObjectKey, content []byte) (runtime.Object, error) {
+- gvk := key.GetGVK()
+- // Decode the bytes to the internal version of the Object, if desired
+- isInternal := gvk.Version == kruntime.APIVersionInternal
+-
+- // Decode the bytes into an Object
+- ct := s.raw.ContentType(key)
+- logrus.Infof("Decoding with content type %s", ct)
+- obj, err := s.serializer.Decoder(
+- serializer.WithConvertToHubDecode(isInternal),
+- ).Decode(serializer.NewFrameReader(ct, serializer.FromBytes(content)))
+- if err != nil {
+- return nil, err
+- }
+-
+- // Cast to runtime.Object, and make sure it works
+- metaObj, ok := obj.(runtime.Object)
+- if !ok {
+- return nil, fmt.Errorf("can't convert to libgitops.runtime.Object")
+- }
+-
+- // Set the desired gvk of this Object from the caller
+- metaObj.GetObjectKind().SetGroupVersionKind(gvk)
+- return metaObj, nil
+-}
+-
+-func (s *GenericStorage) decodeMeta(key ObjectKey, content []byte) (runtime.PartialObject, error) {
+- gvk := key.GetGVK()
+- partobjs, err := DecodePartialObjects(serializer.FromBytes(content), s.serializer.Scheme(), false, &gvk)
+- if err != nil {
+- return nil, err
+- }
+-
+- return partobjs[0], nil
+-}
+-
+-func (s *GenericStorage) walkKind(kind KindKey, fn func(key ObjectKey, content []byte) error) error {
+- keys, err := s.raw.List(kind)
+- if err != nil {
+- return err
+- }
+-
+- for _, key := range keys {
+- // Allow metadata.json to not exist, although the directory does exist
+- if !s.raw.Exists(key) {
+- continue
+- }
+-
+- content, err := s.raw.Read(key)
+- if err != nil {
+- return err
+- }
+-
+- if err := fn(key, content); err != nil {
+- return err
+- }
+- }
+-
+- return nil
+-}
+-
+-// DecodePartialObjects reads any set of frames from the given ReadCloser, decodes the frames into
+-// PartialObjects, validates that the decoded objects are known to the scheme, and optionally sets a default
+-// group
+-func DecodePartialObjects(rc io.ReadCloser, scheme *kruntime.Scheme, allowMultiple bool, defaultGVK *schema.GroupVersionKind) ([]runtime.PartialObject, error) {
+- fr := serializer.NewYAMLFrameReader(rc)
+-
+- frames, err := serializer.ReadFrameList(fr)
+- if err != nil {
+- return nil, err
+- }
+-
+- // If we only allow one frame, signal that early
+- if !allowMultiple && len(frames) != 1 {
+- return nil, fmt.Errorf("DecodePartialObjects: unexpected number of frames received from ReadCloser: %d expected 1", len(frames))
+- }
+-
+- objs := make([]runtime.PartialObject, 0, len(frames))
+- for _, frame := range frames {
+- partobj, err := runtime.NewPartialObject(frame)
+- if err != nil {
+- return nil, err
+- }
+-
+- gvk := partobj.GetObjectKind().GroupVersionKind()
+-
+- // Don't decode API objects unknown to the scheme (e.g. Kubernetes manifests)
+- if !scheme.Recognizes(gvk) {
+- // TODO: Typed error
+- return nil, fmt.Errorf("unknown GroupVersionKind: %s", partobj.GetObjectKind().GroupVersionKind())
+- }
+-
+- if defaultGVK != nil {
+- // Set the desired gvk from the caller of this Object, if defaultGVK is set
+- // In practice, this means, although we got an external type,
+- // we might want internal Objects later in the client. Hence,
+- // set the right expectation here
+- partobj.GetObjectKind().SetGroupVersionKind(gvk)
+- }
+-
+- objs = append(objs, partobj)
+- }
+- return objs, nil
+-}
+diff --git a/pkg/storage/sync/storage.go b/pkg/storage/sync/storage.go
+deleted file mode 100644
+index 458f7fa..0000000
+--- a/pkg/storage/sync/storage.go
++++ /dev/null
+@@ -1,188 +0,0 @@
+-package sync
+-
+-/*
+-
+-TODO: Revisit if we need this file/package in the future.
+-
+-import (
+- "fmt"
+-
+- log "github.com/sirupsen/logrus"
+- "github.com/weaveworks/libgitops/pkg/runtime"
+- "github.com/weaveworks/libgitops/pkg/storage"
+- "github.com/weaveworks/libgitops/pkg/storage/watch"
+- "github.com/weaveworks/libgitops/pkg/storage/watch/update"
+- "github.com/weaveworks/libgitops/pkg/util/sync"
+-)
+-
+-const updateBuffer = 4096 // How many updates to buffer, 4096 should be enough for even a high update frequency
+-
+-// SyncStorage is a Storage implementation taking in multiple Storages and
+-// keeping them in sync. Any write operation executed on the SyncStorage
+-// is propagated to all of the Storages it manages (including the embedded
+-// one). For any retrieval or generation operation, the embedded Storage
+-// will be used (it is treated as read-write). As all other Storages only
+-// receive write operations, they can be thought of as write-only.
+-type SyncStorage struct {
+- storage.Storage
+- storages []storage.Storage
+- inboundStream update.UpdateStream
+- outboundStream update.UpdateStream
+- monitor *sync.Monitor
+-}
+-
+-// SyncStorage implements update.EventStorage.
+-var _ update.EventStorage = &SyncStorage{}
+-
+-// NewSyncStorage constructs a new SyncStorage
+-func NewSyncStorage(rwStorage storage.Storage, wStorages ...storage.Storage) storage.Storage {
+- ss := &SyncStorage{
+- Storage: rwStorage,
+- storages: append(wStorages, rwStorage),
+- }
+-
+- for _, s := range ss.storages {
+- if watchStorage, ok := s.(watch.WatchStorage); ok {
+- // Populate eventStream if we found a watchstorage
+- if ss.inboundStream == nil {
+- ss.inboundStream = make(update.UpdateStream, updateBuffer)
+- }
+- watchStorage.SetUpdateStream(ss.inboundStream)
+- }
+- }
+-
+- if ss.inboundStream != nil {
+- ss.monitor = sync.RunMonitor(ss.monitorFunc)
+- ss.outboundStream = make(update.UpdateStream, updateBuffer)
+- }
+-
+- return ss
+-}
+-
+-// Set is propagated to all Storages
+-func (ss *SyncStorage) Set(obj runtime.Object) error {
+- return ss.runAll(func(s storage.Storage) error {
+- return s.Set(obj)
+- })
+-}
+-
+-// Patch is propagated to all Storages
+-func (ss *SyncStorage) Patch(key storage.ObjectKey, patch []byte) error {
+- return ss.runAll(func(s storage.Storage) error {
+- return s.Patch(key, patch)
+- })
+-}
+-
+-// Delete is propagated to all Storages
+-func (ss *SyncStorage) Delete(key storage.ObjectKey) error {
+- return ss.runAll(func(s storage.Storage) error {
+- return s.Delete(key)
+- })
+-}
+-
+-func (ss *SyncStorage) Close() error {
+- // Close all WatchStorages
+- for _, s := range ss.storages {
+- if watchStorage, ok := s.(watch.WatchStorage); ok {
+- _ = watchStorage.Close()
+- }
+- }
+-
+- // Close the event streams if set
+- if ss.inboundStream != nil {
+- close(ss.inboundStream)
+- }
+- if ss.outboundStream != nil {
+- close(ss.outboundStream)
+- }
+- // Wait for the monitor goroutine
+- ss.monitor.Wait()
+- return nil
+-}
+-
+-func (ss *SyncStorage) GetUpdateStream() update.UpdateStream {
+- return ss.outboundStream
+-}
+-
+-// runAll runs the given function for all Storages in parallel and aggregates all errors
+-func (ss *SyncStorage) runAll(f func(storage.Storage) error) (err error) {
+- type result struct {
+- int
+- error
+- }
+-
+- errC := make(chan result)
+- for i, s := range ss.storages {
+- go func(i int, s storage.Storage) {
+- errC <- result{i, f(s)}
+- }(i, s) // NOTE: This requires i and s as arguments, otherwise they will be evaluated for one Storage only
+- }
+-
+- for i := 0; i < len(ss.storages); i++ {
+- if result := <-errC; result.error != nil {
+- if err == nil {
+- err = fmt.Errorf("SyncStorage: Error in Storage %d: %v", result.int, result.error)
+- } else {
+- err = fmt.Errorf("%v\n%29s %d: %v", err, "and error in Storage", result.int, result.error)
+- }
+- }
+- }
+-
+- return
+-}
+-
+-func (ss *SyncStorage) monitorFunc() {
+- log.Debug("SyncStorage: Monitoring thread started")
+- defer log.Debug("SyncStorage: Monitoring thread stopped")
+-
+- // TODO: Support detecting changes done when the GitOps daemon isn't running
+- // This is difficult to do though, as we have don't know which state is the latest
+- // For now, only update the state on write when the daemon is running
+- for {
+- upd, ok := <-ss.inboundStream
+- if ok {
+- log.Debugf("SyncStorage: Received update %v %t", upd, ok)
+-
+- gvk := upd.PartialObject.GetObjectKind().GroupVersionKind()
+- uid := upd.PartialObject.GetUID()
+- key := storage.NewObjectKey(storage.NewKindKey(gvk), runtime.NewIdentifier(string(uid)))
+- log.Debugf("SyncStorage: Object has gvk=%q and uid=%q", gvk, uid)
+-
+- switch upd.Event {
+- case update.ObjectEventModify, update.ObjectEventCreate:
+- // First load the Object using the Storage given in the update,
+- // then set it using the client constructed above
+-
+- obj, err := upd.Storage.Get(key)
+- if err != nil {
+- log.Errorf("Failed to get Object with UID %q: %v", upd.PartialObject.GetUID(), err)
+- continue
+- }
+-
+- if err = ss.Set(obj); err != nil {
+- log.Errorf("Failed to set Object with UID %q: %v", upd.PartialObject.GetUID(), err)
+- continue
+- }
+- case update.ObjectEventDelete:
+- // For deletion we use the generated "fake" APIType object
+- if err := ss.Delete(key); err != nil {
+- log.Errorf("Failed to delete Object with UID %q: %v", upd.PartialObject.GetUID(), err)
+- continue
+- }
+- }
+-
+- // Send the update to the listeners unless the channel is full,
+- // in which case issue a warning. The channel can hold as many
+- // updates as updateBuffer specifies.
+- select {
+- case ss.outboundStream <- upd:
+- log.Debugf("SyncStorage: Sent update: %v", upd)
+- default:
+- log.Warn("SyncStorage: Failed to send update, channel full")
+- }
+- } else {
+- return
+- }
+- }
+-}
+-*/
+diff --git a/pkg/storage/transaction/commit.go b/pkg/storage/transaction/commit.go
+deleted file mode 100644
+index 30e55ae..0000000
+--- a/pkg/storage/transaction/commit.go
++++ /dev/null
+@@ -1,79 +0,0 @@
+-package transaction
+-
+-import (
+- "fmt"
+-
+- "github.com/fluxcd/go-git-providers/validation"
+-)
+-
+-// CommitResult describes a result of a transaction.
+-type CommitResult interface {
+- // GetAuthorName describes the author's name (as per git config)
+- // +required
+- GetAuthorName() string
+- // GetAuthorEmail describes the author's email (as per git config)
+- // +required
+- GetAuthorEmail() string
+- // GetTitle describes the change concisely, so it can be used as a commit message or PR title.
+- // +required
+- GetTitle() string
+- // GetDescription contains optional extra information about the change.
+- // +optional
+- GetDescription() string
+-
+- // GetMessage returns GetTitle() followed by a newline and GetDescription(), if set.
+- GetMessage() string
+- // Validate validates that all required fields are set, and given data is valid.
+- Validate() error
+-}
+-
+-// GenericCommitResult implements CommitResult.
+-var _ CommitResult = &GenericCommitResult{}
+-
+-// GenericCommitResult implements CommitResult.
+-type GenericCommitResult struct {
+- // AuthorName describes the author's name (as per git config)
+- // +required
+- AuthorName string
+- // AuthorEmail describes the author's email (as per git config)
+- // +required
+- AuthorEmail string
+- // Title describes the change concisely, so it can be used as a commit message or PR title.
+- // +required
+- Title string
+- // Description contains optional extra information about the change.
+- // +optional
+- Description string
+-}
+-
+-func (r *GenericCommitResult) GetAuthorName() string {
+- return r.AuthorName
+-}
+-func (r *GenericCommitResult) GetAuthorEmail() string {
+- return r.AuthorEmail
+-}
+-func (r *GenericCommitResult) GetTitle() string {
+- return r.Title
+-}
+-func (r *GenericCommitResult) GetDescription() string {
+- return r.Description
+-}
+-func (r *GenericCommitResult) GetMessage() string {
+- if len(r.Description) == 0 {
+- return r.Title
+- }
+- return fmt.Sprintf("%s\n%s", r.Title, r.Description)
+-}
+-func (r *GenericCommitResult) Validate() error {
+- v := validation.New("GenericCommitResult")
+- if len(r.AuthorName) == 0 {
+- v.Required("AuthorName")
+- }
+- if len(r.AuthorEmail) == 0 {
+- v.Required("AuthorEmail")
+- }
+- if len(r.Title) == 0 {
+- v.Required("Title")
+- }
+- return v.Error()
+-}
+diff --git a/pkg/storage/transaction/git.go b/pkg/storage/transaction/git.go
+deleted file mode 100644
+index efc57ab..0000000
+--- a/pkg/storage/transaction/git.go
++++ /dev/null
+@@ -1,161 +0,0 @@
+-package transaction
+-
+-import (
+- "context"
+- "fmt"
+- "strings"
+-
+- "github.com/sirupsen/logrus"
+- "github.com/weaveworks/libgitops/pkg/gitdir"
+- "github.com/weaveworks/libgitops/pkg/runtime"
+- "github.com/weaveworks/libgitops/pkg/serializer"
+- "github.com/weaveworks/libgitops/pkg/storage"
+- "github.com/weaveworks/libgitops/pkg/util"
+- "github.com/weaveworks/libgitops/pkg/util/watcher"
+-)
+-
+-var excludeDirs = []string{".git"}
+-
+-func NewGitStorage(gitDir gitdir.GitDirectory, prProvider PullRequestProvider, ser serializer.Serializer) (TransactionStorage, error) {
+- // Make sure the repo is cloned. If this func has already been called, it will be a no-op.
+- if err := gitDir.StartCheckoutLoop(); err != nil {
+- return nil, err
+- }
+-
+- raw := storage.NewGenericMappedRawStorage(gitDir.Dir())
+- s := storage.NewGenericStorage(raw, ser, []runtime.IdentifierFactory{runtime.Metav1NameIdentifier})
+-
+- gitStorage := &GitStorage{
+- ReadStorage: s,
+- s: s,
+- raw: raw,
+- gitDir: gitDir,
+- prProvider: prProvider,
+- }
+- // Do a first sync now, and then start the background loop
+- if err := gitStorage.sync(); err != nil {
+- return nil, err
+- }
+- gitStorage.syncLoop()
+-
+- return gitStorage, nil
+-}
+-
+-type GitStorage struct {
+- storage.ReadStorage
+-
+- s storage.Storage
+- raw storage.MappedRawStorage
+- gitDir gitdir.GitDirectory
+- prProvider PullRequestProvider
+-}
+-
+-func (s *GitStorage) syncLoop() {
+- go func() {
+- for {
+- if commit, ok := <-s.gitDir.CommitChannel(); ok {
+- logrus.Debugf("GitStorage: Got info about commit %q, syncing...", commit)
+- if err := s.sync(); err != nil {
+- logrus.Errorf("GitStorage: Got sync error: %v", err)
+- }
+- }
+- }
+- }()
+-}
+-
+-func (s *GitStorage) sync() error {
+- mappings, err := computeMappings(s.gitDir.Dir(), s.s)
+- if err != nil {
+- return err
+- }
+- logrus.Debugf("Rewriting the mappings to %v", mappings)
+- s.raw.SetMappings(mappings)
+- return nil
+-}
+-
+-func (s *GitStorage) Transaction(ctx context.Context, streamName string, fn TransactionFunc) error {
+- // Append random bytes to the end of the stream name if it ends with a dash
+- if strings.HasSuffix(streamName, "-") {
+- suffix, err := util.RandomSHA(4)
+- if err != nil {
+- return err
+- }
+- streamName += suffix
+- }
+-
+- // Make sure we have the latest available state
+- if err := s.gitDir.Pull(ctx); err != nil {
+- return err
+- }
+- // Make sure no other Git ops can take place during the transaction, wait for other ongoing operations.
+- s.gitDir.Suspend()
+- defer s.gitDir.Resume()
+- // Always switch back to the main branch afterwards.
+- // TODO ordering of the defers, and return deferred error
+- defer func() { _ = s.gitDir.CheckoutMainBranch() }()
+-
+- // Check out a new branch with the given name
+- if err := s.gitDir.CheckoutNewBranch(streamName); err != nil {
+- return err
+- }
+- // Invoke the transaction
+- result, err := fn(ctx, s.s)
+- if err != nil {
+- return err
+- }
+- // Make sure the result is valid
+- if err := result.Validate(); err != nil {
+- return fmt.Errorf("transaction result is not valid: %w", err)
+- }
+- // Perform the commit
+- if err := s.gitDir.Commit(ctx, result.GetAuthorName(), result.GetAuthorEmail(), result.GetMessage()); err != nil {
+- return err
+- }
+- // Return if no PR should be made
+- prResult, ok := result.(PullRequestResult)
+- if !ok {
+- return nil
+- }
+- // If a PR was asked for, and no provider was given, error out
+- if s.prProvider == nil {
+- return ErrNoPullRequestProvider
+- }
+- // Create the PR using the provider.
+- return s.prProvider.CreatePullRequest(ctx, &GenericPullRequestSpec{
+- PullRequestResult: prResult,
+- MainBranch: s.gitDir.MainBranch(),
+- MergeBranch: streamName,
+- RepositoryRef: s.gitDir.RepositoryRef(),
+- })
+-}
+-
+-func computeMappings(dir string, s storage.Storage) (map[storage.ObjectKey]string, error) {
+- validExts := make([]string, 0, len(storage.ContentTypes))
+- for ext := range storage.ContentTypes {
+- validExts = append(validExts, ext)
+- }
+-
+- files, err := watcher.WalkDirectoryForFiles(dir, validExts, excludeDirs)
+- if err != nil {
+- return nil, err
+- }
+-
+- // TODO: Compute the difference between the earlier state, and implement EventStorage so the user
+- // can automatically subscribe to changes of objects between versions.
+- m := map[storage.ObjectKey]string{}
+- for _, file := range files {
+- partObjs, err := storage.DecodePartialObjects(serializer.FromFile(file), s.Serializer().Scheme(), false, nil)
+- if err != nil {
+- logrus.Errorf("couldn't decode %q into a partial object: %v", file, err)
+- continue
+- }
+- key, err := s.ObjectKeyFor(partObjs[0])
+- if err != nil {
+- logrus.Errorf("couldn't get objectkey for partial object: %v", err)
+- continue
+- }
+- logrus.Debugf("Adding mapping between %s and %q", key, file)
+- m[key] = file
+- }
+- return m, nil
+-}
+diff --git a/pkg/storage/transaction/pullrequest.go b/pkg/storage/transaction/pullrequest.go
+deleted file mode 100644
+index bf0fcf2..0000000
+--- a/pkg/storage/transaction/pullrequest.go
++++ /dev/null
+@@ -1,130 +0,0 @@
+-package transaction
+-
+-import (
+- "context"
+-
+- "github.com/fluxcd/go-git-providers/gitprovider"
+- "github.com/fluxcd/go-git-providers/validation"
+-)
+-
+-// PullRequestResult can be returned from a TransactionFunc instead of a CommitResult, if
+-// a PullRequest is desired to be created by the PullRequestProvider.
+-type PullRequestResult interface {
+- // PullRequestResult is a superset of CommitResult
+- CommitResult
+-
+- // GetLabels specifies what labels should be applied on the PR.
+- // +optional
+- GetLabels() []string
+- // GetAssignees specifies what user login names should be assigned to this PR.
+- // Note: Only users with "pull" access or more can be assigned.
+- // +optional
+- GetAssignees() []string
+- // GetMilestone specifies what milestone this should be attached to.
+- // +optional
+- GetMilestone() string
+-}
+-
+-// GenericPullRequestResult implements PullRequestResult.
+-var _ PullRequestResult = &GenericPullRequestResult{}
+-
+-// GenericPullRequestResult implements PullRequestResult.
+-type GenericPullRequestResult struct {
+- // GenericPullRequestResult is a superset of a CommitResult.
+- CommitResult
+-
+- // Labels specifies what labels should be applied on the PR.
+- // +optional
+- Labels []string
+- // Assignees specifies what user login names should be assigned to this PR.
+- // Note: Only users with "pull" access or more can be assigned.
+- // +optional
+- Assignees []string
+- // Milestone specifies what milestone this should be attached to.
+- // +optional
+- Milestone string
+-}
+-
+-func (r *GenericPullRequestResult) GetLabels() []string {
+- return r.Labels
+-}
+-func (r *GenericPullRequestResult) GetAssignees() []string {
+- return r.Assignees
+-}
+-func (r *GenericPullRequestResult) GetMilestone() string {
+- return r.Milestone
+-}
+-func (r *GenericPullRequestResult) Validate() error {
+- v := validation.New("GenericPullRequestResult")
+- // Just validate the "inner" object
+- v.Append(r.CommitResult.Validate(), r.CommitResult, "CommitResult")
+- return v.Error()
+-}
+-
+-// PullRequestSpec is the messaging interface between the TransactionStorage, and the
+-// PullRequestProvider. The PullRequestSpec contains all the needed information for creating
+-// a Pull Request successfully.
+-type PullRequestSpec interface {
+- // PullRequestSpec is a superset of PullRequestResult.
+- PullRequestResult
+-
+- // GetMainBranch returns the main branch of the repository.
+- // +required
+- GetMainBranch() string
+- // GetMergeBranch returns the branch that is pending to be merged into main with this PR.
+- // +required
+- GetMergeBranch() string
+- // GetMergeBranch returns the branch that is pending to be merged into main with this PR.
+- // +required
+- GetRepositoryRef() gitprovider.RepositoryRef
+-}
+-
+-// GenericPullRequestSpec implements PullRequestSpec.
+-type GenericPullRequestSpec struct {
+- // GenericPullRequestSpec is a superset of PullRequestResult.
+- PullRequestResult
+-
+- // MainBranch returns the main branch of the repository.
+- // +required
+- MainBranch string
+- // MergeBranch returns the branch that is pending to be merged into main with this PR.
+- // +required
+- MergeBranch string
+- // RepositoryRef returns the branch that is pending to be merged into main with this PR.
+- // +required
+- RepositoryRef gitprovider.RepositoryRef
+-}
+-
+-func (r *GenericPullRequestSpec) GetMainBranch() string {
+- return r.MainBranch
+-}
+-func (r *GenericPullRequestSpec) GetMergeBranch() string {
+- return r.MergeBranch
+-}
+-func (r *GenericPullRequestSpec) GetRepositoryRef() gitprovider.RepositoryRef {
+- return r.RepositoryRef
+-}
+-func (r *GenericPullRequestSpec) Validate() error {
+- v := validation.New("GenericPullRequestSpec")
+- // Just validate the "inner" object
+- v.Append(r.PullRequestResult.Validate(), r.PullRequestResult, "PullRequestResult")
+-
+- if len(r.MainBranch) == 0 {
+- v.Required("MainBranch")
+- }
+- if len(r.MergeBranch) == 0 {
+- v.Required("MergeBranch")
+- }
+- if r.RepositoryRef == nil {
+- v.Required("RepositoryRef")
+- }
+- return v.Error()
+-}
+-
+-// PullRequestProvider is an interface for providers that can create so-called "Pull Requests",
+-// as popularized by Git. A Pull Request is a formal ask for a branch to be merged into the main one.
+-// It can be UI-based, as in GitHub and GitLab, or it can be using some other method.
+-type PullRequestProvider interface {
+- // CreatePullRequest creates a Pull Request using the given specification.
+- CreatePullRequest(ctx context.Context, spec PullRequestSpec) error
+-}
+diff --git a/pkg/storage/transaction/pullrequest/github/github.go b/pkg/storage/transaction/pullrequest/github/github.go
+deleted file mode 100644
+index d8efbd6..0000000
+--- a/pkg/storage/transaction/pullrequest/github/github.go
++++ /dev/null
+@@ -1,119 +0,0 @@
+-package github
+-
+-import (
+- "context"
+- "errors"
+- "fmt"
+-
+- "github.com/fluxcd/go-git-providers/github"
+- "github.com/fluxcd/go-git-providers/gitprovider"
+- gogithub "github.com/google/go-github/v32/github"
+- "github.com/weaveworks/libgitops/pkg/storage/transaction"
+-)
+-
+-// TODO: This package should really only depend on go-git-providers' abstraction interface
+-
+-var ErrProviderNotSupported = errors.New("only the Github go-git-providers provider is supported at the moment")
+-
+-// NewGitHubPRProvider returns a new transaction.PullRequestProvider from a gitprovider.Client.
+-func NewGitHubPRProvider(c gitprovider.Client) (transaction.PullRequestProvider, error) {
+- // Make sure a Github client was passed
+- if c.ProviderID() != github.ProviderID {
+- return nil, ErrProviderNotSupported
+- }
+- return &prCreator{c}, nil
+-}
+-
+-type prCreator struct {
+- c gitprovider.Client
+-}
+-
+-func (c *prCreator) CreatePullRequest(ctx context.Context, spec transaction.PullRequestSpec) error {
+- // First, validate the input
+- if err := spec.Validate(); err != nil {
+- return fmt.Errorf("given PullRequestSpec wasn't valid")
+- }
+-
+- // Use the "raw" go-github client to do this
+- ghClient := c.c.Raw().(*gogithub.Client)
+-
+- // Helper variables
+- owner := spec.GetRepositoryRef().GetIdentity()
+- repo := spec.GetRepositoryRef().GetRepository()
+- var body *string
+- if spec.GetDescription() != "" {
+- body = gogithub.String(spec.GetDescription())
+- }
+-
+- // Create the Pull Request
+- pr, _, err := ghClient.PullRequests.Create(ctx, owner, repo, &gogithub.NewPullRequest{
+- Head: gogithub.String(spec.GetMergeBranch()),
+- Base: gogithub.String(spec.GetMainBranch()),
+- Title: gogithub.String(spec.GetTitle()),
+- Body: body,
+- })
+- if err != nil {
+- return err
+- }
+-
+- // If spec.GetMilestone() is set, fetch the ID of the milestone
+- // Only set milestoneID to non-nil if specified
+- var milestoneID *int
+- if len(spec.GetMilestone()) != 0 {
+- milestoneID, err = getMilestoneID(ctx, ghClient, owner, repo, spec.GetMilestone())
+- if err != nil {
+- return err
+- }
+- }
+-
+- // Only set assignees to non-nil if specified
+- var assignees *[]string
+- if a := spec.GetAssignees(); len(a) != 0 {
+- assignees = &a
+- }
+-
+- // Only set labels to non-nil if specified
+- var labels *[]string
+- if l := spec.GetLabels(); len(l) != 0 {
+- labels = &l
+- }
+-
+- // Only PATCH the PR if any of the fields were set
+- if milestoneID != nil || assignees != nil || labels != nil {
+- _, _, err := ghClient.Issues.Edit(ctx, owner, repo, pr.GetNumber(), &gogithub.IssueRequest{
+- Milestone: milestoneID,
+- Assignees: assignees,
+- Labels: labels,
+- })
+- if err != nil {
+- return err
+- }
+- }
+-
+- return nil
+-}
+-
+-func getMilestoneID(ctx context.Context, c *gogithub.Client, owner, repo, milestoneName string) (*int, error) {
+- // List all milestones in the repo
+- // TODO: This could/should use pagination
+- milestones, _, err := c.Issues.ListMilestones(ctx, owner, repo, &gogithub.MilestoneListOptions{
+- State: "all",
+- })
+- if err != nil {
+- return nil, err
+- }
+- // Loop through all milestones, search for one with the right name
+- for _, milestone := range milestones {
+- // Only consider a milestone with the right name
+- if milestone.GetTitle() != milestoneName {
+- continue
+- }
+- // Validate nil to avoid panics
+- if milestone.Number == nil {
+- return nil, fmt.Errorf("didn't expect milestone Number to be nil: %v", milestone)
+- }
+- // Return the Milestone number
+- return milestone.Number, nil
+- }
+- return nil, fmt.Errorf("couldn't find milestone with name: %s", milestoneName)
+-}
+diff --git a/pkg/storage/transaction/storage.go b/pkg/storage/transaction/storage.go
+deleted file mode 100644
+index 8a60e93..0000000
+--- a/pkg/storage/transaction/storage.go
++++ /dev/null
+@@ -1,28 +0,0 @@
+-package transaction
+-
+-import (
+- "context"
+- "errors"
+-
+- "github.com/weaveworks/libgitops/pkg/storage"
+-)
+-
+-var (
+- ErrAbortTransaction = errors.New("transaction aborted")
+- ErrTransactionActive = errors.New("transaction is active")
+- ErrNoPullRequestProvider = errors.New("no pull request provider given")
+-)
+-
+-type TransactionFunc func(ctx context.Context, s storage.Storage) (CommitResult, error)
+-
+-type TransactionStorage interface {
+- storage.ReadStorage
+-
+- // Transaction creates a new "stream" (for Git: branch) with the given name, or
+- // prefix if streamName ends with a dash (in that case, a 8-char hash will be appended).
+- // The environment is made sure to be as up-to-date as possible before fn executes. When
+- // fn executes, the given storage can be used to modify the desired state. If you want to
+- // "commit" the changes made in fn, just return nil. If you want to abort, return ErrAbortTransaction.
+- // If you want to
+- Transaction(ctx context.Context, streamName string, fn TransactionFunc) error
+-}
+diff --git a/pkg/storage/utils.go b/pkg/storage/utils.go
+new file mode 100644
+index 0000000..d45323b
+--- /dev/null
++++ b/pkg/storage/utils.go
+@@ -0,0 +1,23 @@
++package storage
++
++import (
++ "fmt"
++
++ "github.com/weaveworks/libgitops/pkg/storage/core"
++)
++
++// VerifyNamespaced verifies that the given GroupKind and namespace parameter follows
++// the rule of the Namespacer.
++func VerifyNamespaced(namespacer core.Namespacer, gk core.GroupKind, ns string) error {
++ // Get namespacing info
++ namespaced, err := namespacer.IsNamespaced(gk)
++ if err != nil {
++ return err
++ }
++ if namespaced && ns == "" {
++ return fmt.Errorf("%w: namespaced kind %v requires non-empty namespace", ErrNamespacedMismatch, gk)
++ } else if !namespaced && ns != "" {
++ return fmt.Errorf("%w: non-namespaced kind %v must not have namespace parameter set", ErrNamespacedMismatch, gk)
++ }
++ return nil
++}
+diff --git a/pkg/storage/watch/storage.go b/pkg/storage/watch/storage.go
+deleted file mode 100644
+index f3d7b0b..0000000
+--- a/pkg/storage/watch/storage.go
++++ /dev/null
+@@ -1,244 +0,0 @@
+-package watch
+-
+-import (
+- "io/ioutil"
+-
+- log "github.com/sirupsen/logrus"
+- "github.com/weaveworks/libgitops/pkg/runtime"
+- "github.com/weaveworks/libgitops/pkg/serializer"
+- "github.com/weaveworks/libgitops/pkg/storage"
+- "github.com/weaveworks/libgitops/pkg/storage/watch/update"
+- "github.com/weaveworks/libgitops/pkg/util/sync"
+- "github.com/weaveworks/libgitops/pkg/util/watcher"
+- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+- "k8s.io/apimachinery/pkg/types"
+-)
+-
+-// NewManifestStorage returns a pre-configured GenericWatchStorage backed by a storage.GenericStorage,
+-// and a GenericMappedRawStorage for the given manifestDir and Serializer. This should be sufficient
+-// for most users that want to watch changes in a directory with manifests.
+-func NewManifestStorage(manifestDir string, ser serializer.Serializer) (update.EventStorage, error) {
+- return NewGenericWatchStorage(
+- storage.NewGenericStorage(
+- storage.NewGenericMappedRawStorage(manifestDir),
+- ser,
+- []runtime.IdentifierFactory{runtime.Metav1NameIdentifier},
+- ),
+- )
+-}
+-
+-// NewGenericWatchStorage is an extended Storage implementation, which provides a watcher
+-// for watching changes in the directory managed by the embedded Storage's RawStorage.
+-// If the RawStorage is a MappedRawStorage instance, it's mappings will automatically
+-// be updated by the WatchStorage. Update events are sent to the given event stream.
+-// Note: This WatchStorage only works for one-frame files (i.e. only one YAML document
+-// per file is supported).
+-func NewGenericWatchStorage(s storage.Storage) (update.EventStorage, error) {
+- ws := &GenericWatchStorage{
+- Storage: s,
+- }
+-
+- var err error
+- var files []string
+- if ws.watcher, files, err = watcher.NewFileWatcher(s.RawStorage().WatchDir()); err != nil {
+- return nil, err
+- }
+-
+- ws.monitor = sync.RunMonitor(func() {
+- ws.monitorFunc(ws.RawStorage(), files) // Offload the file registration to the goroutine
+- })
+-
+- return ws, nil
+-}
+-
+-// EventDeleteObjectName is used as the name of an object sent to the
+-// GenericWatchStorage's event stream when the the object has been deleted
+-const EventDeleteObjectName = ""
+-
+-// GenericWatchStorage implements the WatchStorage interface
+-type GenericWatchStorage struct {
+- storage.Storage
+- watcher *watcher.FileWatcher
+- events update.UpdateStream
+- monitor *sync.Monitor
+-}
+-
+-var _ update.EventStorage = &GenericWatchStorage{}
+-
+-// Suspend modify events during Create
+-func (s *GenericWatchStorage) Create(obj runtime.Object) error {
+- s.watcher.Suspend(watcher.FileEventModify)
+- return s.Storage.Create(obj)
+-}
+-
+-// Suspend modify events during Update
+-func (s *GenericWatchStorage) Update(obj runtime.Object) error {
+- s.watcher.Suspend(watcher.FileEventModify)
+- return s.Storage.Update(obj)
+-}
+-
+-// Suspend modify events during Patch
+-func (s *GenericWatchStorage) Patch(key storage.ObjectKey, patch []byte) error {
+- s.watcher.Suspend(watcher.FileEventModify)
+- return s.Storage.Patch(key, patch)
+-}
+-
+-// Suspend delete events during Delete
+-func (s *GenericWatchStorage) Delete(key storage.ObjectKey) error {
+- s.watcher.Suspend(watcher.FileEventDelete)
+- return s.Storage.Delete(key)
+-}
+-
+-func (s *GenericWatchStorage) SetUpdateStream(eventStream update.UpdateStream) {
+- s.events = eventStream
+-}
+-
+-func (s *GenericWatchStorage) Close() error {
+- s.watcher.Close()
+- s.monitor.Wait()
+- return nil
+-}
+-
+-func (s *GenericWatchStorage) monitorFunc(raw storage.RawStorage, files []string) {
+- log.Debug("GenericWatchStorage: Monitoring thread started")
+- defer log.Debug("GenericWatchStorage: Monitoring thread stopped")
+- var content []byte
+-
+- // Send a MODIFY event for all files (and fill the mappings
+- // of the MappedRawStorage) before starting to monitor changes
+- for _, file := range files {
+- content, err := ioutil.ReadFile(file)
+- if err != nil {
+- log.Warnf("Ignoring %q: %v", file, err)
+- continue
+- }
+-
+- obj, err := runtime.NewPartialObject(content)
+- if err != nil {
+- log.Warnf("Ignoring %q: %v", file, err)
+- continue
+- }
+-
+- // Add a mapping between this object and path
+- s.addMapping(raw, obj, file)
+- // Send the event to the events channel
+- s.sendEvent(update.ObjectEventModify, obj)
+- }
+-
+- for {
+- if event, ok := <-s.watcher.GetFileUpdateStream(); ok {
+- var partObj runtime.PartialObject
+- var err error
+-
+- var objectEvent update.ObjectEvent
+- switch event.Event {
+- case watcher.FileEventModify:
+- objectEvent = update.ObjectEventModify
+- case watcher.FileEventDelete:
+- objectEvent = update.ObjectEventDelete
+- }
+-
+- log.Tracef("GenericWatchStorage: Processing event: %s", event.Event)
+- if event.Event == watcher.FileEventDelete {
+- key, err := raw.GetKey(event.Path)
+- if err != nil {
+- log.Warnf("Failed to retrieve data for %q: %v", event.Path, err)
+- continue
+- }
+-
+- // This creates a "fake" Object from the key to be used for
+- // deletion, as the original has already been removed from disk
+- apiVersion, kind := key.GetGVK().ToAPIVersionAndKind()
+- partObj = &runtime.PartialObjectImpl{
+- TypeMeta: metav1.TypeMeta{
+- APIVersion: apiVersion,
+- Kind: kind,
+- },
+- ObjectMeta: metav1.ObjectMeta{
+- Name: EventDeleteObjectName,
+- // TODO: This doesn't take into account where e.g. the identifier is "{namespace}/{name}"
+- UID: types.UID(key.GetIdentifier()),
+- },
+- }
+- // remove the mapping for this key as it's now deleted
+- s.removeMapping(raw, key)
+- } else {
+- content, err = ioutil.ReadFile(event.Path)
+- if err != nil {
+- log.Warnf("Ignoring %q: %v", event.Path, err)
+- continue
+- }
+-
+- if partObj, err = runtime.NewPartialObject(content); err != nil {
+- log.Warnf("Ignoring %q: %v", event.Path, err)
+- continue
+- }
+-
+- if event.Event == watcher.FileEventMove {
+- // Update the mappings for the moved file (AddMapping overwrites)
+- s.addMapping(raw, partObj, event.Path)
+-
+- // Internal move events are a no-op
+- continue
+- }
+-
+- // This is based on the key's existence instead of watcher.EventCreate,
+- // as Objects can get updated (via watcher.FileEventModify) to be conformant
+- if _, err = raw.GetKey(event.Path); err != nil {
+- // Add a mapping between this object and path
+- s.addMapping(raw, partObj, event.Path)
+-
+- // This is what actually determines if an Object is created,
+- // so update the event to update.ObjectEventCreate here
+- objectEvent = update.ObjectEventCreate
+- }
+- }
+-
+- // Send the objectEvent to the events channel
+- if objectEvent != update.ObjectEventNone {
+- s.sendEvent(objectEvent, partObj)
+- }
+- } else {
+- return
+- }
+- }
+-}
+-
+-func (s *GenericWatchStorage) sendEvent(event update.ObjectEvent, partObj runtime.PartialObject) {
+- if s.events != nil {
+- log.Tracef("GenericWatchStorage: Sending event: %v", event)
+- s.events <- update.Update{
+- Event: event,
+- PartialObject: partObj,
+- Storage: s,
+- }
+- }
+-}
+-
+-// addMapping registers a mapping between the given object and the specified path, if raw is a
+-// MappedRawStorage. If a given mapping already exists between this object and some path, it
+-// will be overridden with the specified new path
+-func (s *GenericWatchStorage) addMapping(raw storage.RawStorage, obj runtime.Object, file string) {
+- mapped, ok := raw.(storage.MappedRawStorage)
+- if !ok {
+- return
+- }
+-
+- // Let the embedded storage decide using its identifiers how to
+- key, err := s.Storage.ObjectKeyFor(obj)
+- if err != nil {
+- log.Errorf("couldn't get object key for: gvk=%s, uid=%s, name=%s", obj.GetObjectKind().GroupVersionKind(), obj.GetUID(), obj.GetName())
+- }
+-
+- mapped.AddMapping(key, file)
+-}
+-
+-// removeMapping removes a mapping a file that doesn't exist
+-func (s *GenericWatchStorage) removeMapping(raw storage.RawStorage, key storage.ObjectKey) {
+- mapped, ok := raw.(storage.MappedRawStorage)
+- if !ok {
+- return
+- }
+-
+- mapped.RemoveMapping(key)
+-}
+diff --git a/pkg/storage/watch/update/event.go b/pkg/storage/watch/update/event.go
+deleted file mode 100644
+index 57367b7..0000000
+--- a/pkg/storage/watch/update/event.go
++++ /dev/null
+@@ -1,31 +0,0 @@
+-package update
+-
+-import "fmt"
+-
+-// ObjectEvent is an enum describing a change in an Object's state.
+-type ObjectEvent byte
+-
+-var _ fmt.Stringer = ObjectEvent(0)
+-
+-const (
+- ObjectEventNone ObjectEvent = iota // 0
+- ObjectEventCreate // 1
+- ObjectEventModify // 2
+- ObjectEventDelete // 3
+-)
+-
+-func (o ObjectEvent) String() string {
+- switch o {
+- case 0:
+- return "NONE"
+- case 1:
+- return "CREATE"
+- case 2:
+- return "MODIFY"
+- case 3:
+- return "DELETE"
+- }
+-
+- // Should never happen
+- return "UNKNOWN"
+-}
+diff --git a/pkg/storage/watch/update/update.go b/pkg/storage/watch/update/update.go
+deleted file mode 100644
+index 05ea7e0..0000000
+--- a/pkg/storage/watch/update/update.go
++++ /dev/null
+@@ -1,28 +0,0 @@
+-package update
+-
+-import (
+- "github.com/weaveworks/libgitops/pkg/runtime"
+- "github.com/weaveworks/libgitops/pkg/storage"
+-)
+-
+-// Update bundles an FileEvent with an
+-// APIType for Storage retrieval.
+-type Update struct {
+- Event ObjectEvent
+- PartialObject runtime.PartialObject
+- Storage storage.Storage
+-}
+-
+-// UpdateStream is a channel of updates.
+-type UpdateStream chan Update
+-
+-// EventStorage is a storage that exposes an UpdateStream.
+-type EventStorage interface {
+- storage.Storage
+-
+- // SetUpdateStream gives the EventStorage a channel to send events to.
+- // The caller is responsible for choosing a large enough buffer to avoid
+- // blocking the underlying EventStorage implementation unnecessarily.
+- // TODO: In the future maybe enable sending events to multiple listeners?
+- SetUpdateStream(UpdateStream)
+-}
+diff --git a/pkg/util/fs.go b/pkg/util/fs.go
+deleted file mode 100644
+index 3e1f7d4..0000000
+--- a/pkg/util/fs.go
++++ /dev/null
+@@ -1,23 +0,0 @@
+-package util
+-
+-import (
+- "os"
+-)
+-
+-func PathExists(path string) (bool, os.FileInfo) {
+- info, err := os.Stat(path)
+- if os.IsNotExist(err) {
+- return false, nil
+- }
+-
+- return true, info
+-}
+-
+-func FileExists(filename string) bool {
+- exists, info := PathExists(filename)
+- if !exists {
+- return false
+- }
+-
+- return !info.IsDir()
+-}
+diff --git a/pkg/util/patch/patch.go b/pkg/util/patch/patch.go
+index 11c29ea..535be55 100644
+--- a/pkg/util/patch/patch.go
++++ b/pkg/util/patch/patch.go
+@@ -1,103 +1,88 @@
+ package patch
+
+ import (
+- "bytes"
++ "encoding/json"
+ "fmt"
+- "io/ioutil"
+
+- "github.com/weaveworks/libgitops/pkg/runtime"
+- "github.com/weaveworks/libgitops/pkg/serializer"
+- "k8s.io/apimachinery/pkg/runtime/schema"
++ jsonbytepatcher "github.com/evanphx/json-patch"
++ "k8s.io/apimachinery/pkg/api/errors"
++ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/strategicpatch"
+ )
+
+-type Patcher interface {
+- Create(new runtime.Object, applyFn func(runtime.Object) error) ([]byte, error)
+- Apply(original, patch []byte, gvk schema.GroupVersionKind) ([]byte, error)
+- ApplyOnFile(filePath string, patch []byte, gvk schema.GroupVersionKind) error
++// BytePatcherForType returns the right BytePatcher for the given
++// patch type.
++//
++// Note: if patchType is unknown, the return value will be nil, so make
++// sure you check the BytePatcher is non-nil before using it!
++func BytePatcherForType(patchType types.PatchType) BytePatcher {
++ switch patchType {
++ case types.JSONPatchType:
++ return JSONBytePatcher{}
++ case types.MergePatchType:
++ return MergeBytePatcher{}
++ case types.StrategicMergePatchType:
++ return StrategicMergeBytePatcher{}
++ default:
++ return nil
++ }
+ }
+
+-func NewPatcher(s serializer.Serializer) Patcher {
+- return &patcher{serializer: s}
+-}
++// maximum number of operations a single json patch may contain.
++const maxJSONBytePatcherOperations = 10000
+
+-type patcher struct {
+- serializer serializer.Serializer
++type BytePatcher interface {
++ // TODO: SupportedType() types.PatchType
++ // currentData must be versioned bytes of the same GVK as into and patch.Data() (if merge patch)
++ // into must be an empty object
++ Apply(currentJSON, patchJSON []byte, schema strategicpatch.LookupPatchMeta) ([]byte, error)
+ }
+
+-// Create is a helper that creates a patch out of the change made in applyFn
+-func (p *patcher) Create(new runtime.Object, applyFn func(runtime.Object) error) (patchBytes []byte, err error) {
+- var oldBytes, newBytes bytes.Buffer
+- encoder := p.serializer.Encoder()
+- old := new.DeepCopyObject().(runtime.Object)
+-
+- if err = encoder.Encode(serializer.NewJSONFrameWriter(&oldBytes), old); err != nil {
+- return
+- }
+-
+- if err = applyFn(new); err != nil {
+- return
+- }
+-
+- if err = encoder.Encode(serializer.NewJSONFrameWriter(&newBytes), new); err != nil {
+- return
++type JSONBytePatcher struct{}
++
++func (JSONBytePatcher) Apply(currentJSON, patchJSON []byte, _ strategicpatch.LookupPatchMeta) ([]byte, error) {
++ // sanity check potentially abusive patches
++ // TODO(liggitt): drop this once golang json parser limits stack depth (https://github.com/golang/go/issues/31789)
++ // TODO(luxas): Go v1.15 has the above mentioned patch, what needs changing now?
++ if len(patchJSON) > 1024*1024 {
++ v := []interface{}{}
++ if err := json.Unmarshal(patchJSON, &v); err != nil {
++ return nil, fmt.Errorf("error decoding patch: %v", err)
++ }
+ }
+
+- emptyObj, err := p.serializer.Scheme().New(old.GetObjectKind().GroupVersionKind())
+- if err != nil {
+- return
+- }
+-
+- patchBytes, err = strategicpatch.CreateTwoWayMergePatch(oldBytes.Bytes(), newBytes.Bytes(), emptyObj)
+- if err != nil {
+- return nil, fmt.Errorf("CreateTwoWayMergePatch failed: %v", err)
+- }
+-
+- return patchBytes, nil
+-}
+-
+-func (p *patcher) Apply(original, patch []byte, gvk schema.GroupVersionKind) ([]byte, error) {
+- emptyObj, err := p.serializer.Scheme().New(gvk)
++ patchObj, err := jsonbytepatcher.DecodePatch(patchJSON)
+ if err != nil {
+ return nil, err
+ }
+-
+- b, err := strategicpatch.StrategicMergePatch(original, patch, emptyObj)
+- if err != nil {
+- return nil, err
++ if len(patchObj) > maxJSONBytePatcherOperations {
++ return nil, errors.NewRequestEntityTooLargeError(
++ fmt.Sprintf("The allowed maximum operations in a JSON patch is %d, got %d",
++ maxJSONBytePatcherOperations, len(patchObj)))
+ }
+-
+- return p.serializerEncode(b)
++ return patchObj.Apply(currentJSON)
+ }
+
+-func (p *patcher) ApplyOnFile(filePath string, patch []byte, gvk schema.GroupVersionKind) error {
+- oldContent, err := ioutil.ReadFile(filePath)
+- if err != nil {
+- return err
+- }
+-
+- newContent, err := p.Apply(oldContent, patch, gvk)
+- if err != nil {
+- return err
++type MergeBytePatcher struct{}
++
++func (MergeBytePatcher) Apply(currentJSON, patchJSON []byte, _ strategicpatch.LookupPatchMeta) ([]byte, error) {
++ // sanity check potentially abusive patches
++ // TODO(liggitt): drop this once golang json parser limits stack depth (https://github.com/golang/go/issues/31789)
++ // TODO(luxas): Go v1.15 has the above mentioned patch, what needs changing now?
++ if len(patchJSON) > 1024*1024 {
++ v := map[string]interface{}{}
++ if err := json.Unmarshal(patchJSON, &v); err != nil {
++ return nil, errors.NewBadRequest(fmt.Sprintf("error decoding patch: %v", err))
++ }
+ }
+
+- return ioutil.WriteFile(filePath, newContent, 0644)
++ return jsonbytepatcher.MergePatch(currentJSON, patchJSON)
+ }
+
+-// StrategicMergePatch returns an unindented, unorganized JSON byte slice,
+-// this helper takes that as an input and returns the same JSON re-encoded
+-// with the serializer so it conforms to a runtime.Object
+-// TODO: Just use encoding/json.Indent here instead?
+-func (p *patcher) serializerEncode(input []byte) ([]byte, error) {
+- obj, err := p.serializer.Decoder().Decode(serializer.NewJSONFrameReader(serializer.FromBytes(input)))
+- if err != nil {
+- return nil, err
+- }
+-
+- var result bytes.Buffer
+- if err := p.serializer.Encoder().Encode(serializer.NewJSONFrameWriter(&result), obj); err != nil {
+- return nil, err
+- }
++type StrategicMergeBytePatcher struct{}
+
+- return result.Bytes(), err
++func (StrategicMergeBytePatcher) Apply(currentJSON, patchJSON []byte, schema strategicpatch.LookupPatchMeta) ([]byte, error) {
++ // TODO: Also check for overflow here?
++ // TODO: What to do when schema is nil? error?
++ return strategicpatch.StrategicMergePatchUsingLookupPatchMeta(currentJSON, patchJSON, schema)
+ }
+diff --git a/pkg/util/patch/patch_test.go b/pkg/util/patch/patch_test.go
+index 9a3cf54..c9d1b01 100644
+--- a/pkg/util/patch/patch_test.go
++++ b/pkg/util/patch/patch_test.go
+@@ -1,5 +1,9 @@
+ package patch
+
++/*
++
++TODO: Create good unit tests for this package!
++
+ import (
+ "bytes"
+ "testing"
+@@ -58,3 +62,4 @@ func TestApplyPatch(t *testing.T) {
+ t.Fatal(err)
+ }
+ }
++*/
+diff --git a/pkg/util/sync/monitor.go b/pkg/util/sync/monitor.go
+index f09c55c..111a294 100644
+--- a/pkg/util/sync/monitor.go
++++ b/pkg/util/sync/monitor.go
+@@ -1,31 +1,39 @@
+ package sync
+
+-import "sync"
++import (
++ "errors"
++ "sync"
++)
+
+ // Monitor is a convenience wrapper around
+ // starting a goroutine with a wait group,
+ // which can be used to wait for the
+ // goroutine to stop.
+ type Monitor struct {
+- wg *sync.WaitGroup
++ wg *sync.WaitGroup
++ err error
+ }
+
+-func RunMonitor(f func()) (m *Monitor) {
+- m = &Monitor{
++func RunMonitor(f func() error) *Monitor {
++ m := &Monitor{
+ wg: new(sync.WaitGroup),
+ }
+
+ m.wg.Add(1)
+ go func() {
+- f()
++ m.err = f()
+ m.wg.Done()
+ }()
+
+- return
++ return m
+ }
+
+-func (m *Monitor) Wait() {
+- if m != nil {
+- m.wg.Wait()
++func (m *Monitor) Wait() error {
++ // TODO: Do we need this check?
++ if m == nil {
++ return errors.New("Monitor: invalid null pointer to m")
+ }
++ // TODO: maybe this could be easier implemented using just a channel?
++ m.wg.Wait()
++ return m.err
+ }
+diff --git a/pkg/util/util.go b/pkg/util/util.go
+deleted file mode 100644
+index c80159c..0000000
+--- a/pkg/util/util.go
++++ /dev/null
+@@ -1,54 +0,0 @@
+-package util
+-
+-import (
+- "bytes"
+- "crypto/rand"
+- "encoding/hex"
+- "fmt"
+- "os/exec"
+- "strings"
+-)
+-
+-func ExecuteCommand(command string, args ...string) (string, error) {
+- cmd := exec.Command(command, args...)
+- out, err := cmd.CombinedOutput()
+- if err != nil {
+- return "", fmt.Errorf("command %q exited with %q: %v", cmd.Args, out, err)
+- }
+-
+- return string(bytes.TrimSpace(out)), nil
+-}
+-
+-func MatchPrefix(prefix string, fields ...string) ([]string, bool) {
+- var prefixMatches, exactMatches []string
+-
+- for _, str := range fields {
+- if str == prefix {
+- exactMatches = append(exactMatches, str)
+- } else if strings.HasPrefix(str, prefix) {
+- prefixMatches = append(prefixMatches, str)
+- }
+- }
+-
+- // If we have exact matches, return them
+- // and set the exact match boolean
+- if len(exactMatches) > 0 {
+- return exactMatches, true
+- }
+-
+- return prefixMatches, false
+-}
+-
+-func BoolPtr(b bool) *bool {
+- return &b
+-}
+-
+-// RandomSHA returns a hex-encoded string from {byteLen} random bytes.
+-func RandomSHA(byteLen int) (string, error) {
+- b := make([]byte, byteLen)
+- _, err := rand.Read(b)
+- if err != nil {
+- return "", err
+- }
+- return hex.EncodeToString(b), nil
+-}
+diff --git a/pkg/util/watcher/dir_traversal.go b/pkg/util/watcher/dir_traversal.go
+deleted file mode 100644
+index 739ecf7..0000000
+--- a/pkg/util/watcher/dir_traversal.go
++++ /dev/null
+@@ -1,60 +0,0 @@
+-package watcher
+-
+-import (
+- "os"
+- "path/filepath"
+- "strings"
+-)
+-
+-func (w *FileWatcher) getFiles() ([]string, error) {
+- return WalkDirectoryForFiles(w.dir, w.opts.ValidExtensions, w.opts.ExcludeDirs)
+-}
+-
+-func (w *FileWatcher) validFile(path string) bool {
+- return isValidFile(path, w.opts.ValidExtensions, w.opts.ExcludeDirs)
+-}
+-
+-// WalkDirectoryForFiles discovers all subdirectories and
+-// returns a list of valid files in them
+-func WalkDirectoryForFiles(dir string, validExts, excludeDirs []string) (files []string, err error) {
+- err = filepath.Walk(dir,
+- func(path string, info os.FileInfo, err error) error {
+- if err != nil {
+- return err
+- }
+-
+- if !info.IsDir() {
+- // Only include valid files
+- if isValidFile(path, validExts, excludeDirs) {
+- files = append(files, path)
+- }
+- }
+-
+- return nil
+- })
+-
+- return
+-}
+-
+-// isValidFile is used to filter out all unsupported
+-// files based on if their extension is unknown or
+-// if their path contains an excluded directory
+-func isValidFile(path string, validExts, excludeDirs []string) bool {
+- parts := strings.Split(filepath.Clean(path), string(os.PathSeparator))
+- ext := filepath.Ext(parts[len(parts)-1])
+- for _, suffix := range validExts {
+- if ext == suffix {
+- return true
+- }
+- }
+-
+- for i := 0; i < len(parts)-1; i++ {
+- for _, exclude := range excludeDirs {
+- if parts[i] == exclude {
+- return false
+- }
+- }
+- }
+-
+- return false
+-}
+diff --git a/pkg/util/watcher/event.go b/pkg/util/watcher/event.go
+deleted file mode 100644
+index 4da933d..0000000
+--- a/pkg/util/watcher/event.go
++++ /dev/null
+@@ -1,64 +0,0 @@
+-package watcher
+-
+-import (
+- "fmt"
+- "strings"
+-)
+-
+-// FileEvent is an enum describing a change in a file's state
+-type FileEvent byte
+-
+-const (
+- FileEventNone FileEvent = iota // 0
+- FileEventModify // 1
+- FileEventDelete // 2
+- FileEventMove // 3
+-)
+-
+-func (e FileEvent) String() string {
+- switch e {
+- case 0:
+- return "NONE"
+- case 1:
+- return "MODIFY"
+- case 2:
+- return "DELETE"
+- case 3:
+- return "MOVE"
+- }
+-
+- return "UNKNOWN"
+-}
+-
+-// FileEvents is a slice of FileEvents
+-type FileEvents []FileEvent
+-
+-var _ fmt.Stringer = FileEvents{}
+-
+-func (e FileEvents) String() string {
+- strs := make([]string, 0, len(e))
+- for _, ev := range e {
+- strs = append(strs, ev.String())
+- }
+-
+- return strings.Join(strs, ",")
+-}
+-
+-func (e FileEvents) Bytes() []byte {
+- b := make([]byte, 0, len(e))
+- for _, event := range e {
+- b = append(b, byte(event))
+- }
+-
+- return b
+-}
+-
+-// FileUpdates is a slice of FileUpdate pointers
+-type FileUpdates []*FileUpdate
+-
+-// FileUpdate is used by watchers to
+-// signal the state change of a file.
+-type FileUpdate struct {
+- Event FileEvent
+- Path string
+-}
diff --git a/framing.md b/framing.md
new file mode 100644
index 00000000..27862399
--- /dev/null
+++ b/framing.md
@@ -0,0 +1,318 @@
+# Framing
+
+A frame is serialized bytes representing exactly one decodable object, into a Go struct.
+
+The framing package lives in `github.com/weaveworks/libgitops/pkg/frame`, providing YAML and JSON framing by default, but is extensible to other content types as well.
+
+A valid frame should not contain any frame separators (e.g. `---` for YAML), and must not be empty. A frame (and `frame.Reader` or `frame.Writer`) is content-type specific, where the content type is e.g. YAML or JSON.
+
+The source/destination byte stream that is being "framed" by a `frame.Reader` or `frame.Writer` can be for example a file, `/dev/std{in,out,err}`, an HTTP request, or some Go `string`/`[]byte`, for example.
+
+> Note that “frames” and “framer” terminology was borrowed from [`k8s.io/apimachinery`](TODO). Frame maps to the YAML 1.2 spec definition of “documents”, as per below.
+
+## Goals
+
+TODO
+
+## Noteworthy interfaces
+
+TODO
+
+## Default implementations
+
+- `frame.DefaultFactory()` gives you a combined `frame.ReaderFactory` and `frame.WriterFactory` that supports JSON and YAML.
+
+## Examples
+
+### YAML vs JSON frames
+
+This YAML stream contains two frames, i.e. 2 [YAML documents](https://yaml.org/spec/1.2/spec.html#id2800132):
+
+```yaml
+---
+# Frame 1
+foo: bar
+bla: true
+---
+# Frame 2
+bar: 123
+---
+```
+
+The similar list of frames in JSON would be represented as follows:
+
+```json
+{
+ "foo": "bar",
+ "bla": true
+}
+{
+ "bar": 123
+}
+```
+
+An interesting observation about JSON is that it's "self-framing". The JSON decoder in Go can figure out where an object starts and ends, hence there's no need for extra frame separators, like in YAML.
+
+### Matching a Go struct
+
+"Decodable into a Go struct" means that for the example above, the first frame returned by a framer is:
+
+```yaml
+# Frame 1
+foo: bar
+bla: true
+```
+
+```yaml
+# Frame 2
+bar: 123
+```
+
+And this serialized content matches the following Go structs:
+
+```go
+type T1 struct {
+ Foo string `json:"foo"`
+ Bla bool `json:"bla"`
+}
+
+type T2 struct {
+ Bar int64 `json:"bar"`
+}
+```
+
+Now, you might ask yourself, that if you look at a generic frame returned from the example above, how do you figure out whether a generic frame should be decoded into `T1` or `T2`, or any other type?
+
+One quick idea would be to annotate the serialized byte representation with some metadata about what content the frame describes. For example, there could be a `kind` field specifying `T1` and `T2` above, respectively.
+
+This is one of the reasons why Kubernetes has [Group, Version and Kinds](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#types-kinds).
+
+But if there's only a `kind` field, it'd be very easy to create naming conflicts, if the whole software ecosystem must agree on or allocate their `kind`s.
+
+For example: There could be `kind: Cluster`, but without any logical grouping, you wouldn't know if it's an etcd, MySQL or Kubernetes cluster that is being referred to.
+
+This is why there exists `group`s in Kubernetes as well. The `apiVersion` field of most Kubernetes-like objects is actually of form: `group/version`. (With exception to `apiVersion: v1` which has `group == ""` (also known as `core`) and `version=="v1"`)
+
+Shortly, the `group` serves as a virtual "namespace" of what the `kind` refers to. `version` specifies the schema of the given object. `version` is very important to allow your schema evolve over time.
+
+For example, imagine some kind of distributed database with the following initial schema
+
+```yaml
+apiVersion: my-replicated-db.com/v1alpha1
+kind: Database
+spec:
+ isReplicated: true # A simple boolean telling that the database should be replicated
+```
+
+(by convention, versioning starts from `v1alpha1`, that is, "the first alpha release of the first schema version")
+
+Over time, you realize that you actually need to specify _how_ many replicas there should be, so you release `v1alpha2` ("the second alpha release of the first schema version"):
+
+```yaml
+apiVersion: my-replicated-db.com/v1alpha2
+kind: Database
+spec:
+ replicas: 3 # A how many replicas should the database use?
+```
+
+Later, you realize that there is a need to distinguish between read and write replicas, hence you change the schema once again. But as you feel confident in this design, you upgrade the schema to `v1beta1` ("the first beta release of the first schema version"):
+
+```yaml
+apiVersion: my-replicated-db.com/v1beta1
+kind: Database
+spec:
+ replicas: # A how many read/write replicas should the database use?
+ read: 3
+ write: 1
+```
+
+Thanks to specifying the `version` as well, your application can support decoding all three different versions of the objects, as long as you include the corresponding Go structs for all three versions in your Go code.
+
+For now, we don't need to dive into how exactly to decode the frames, but it's important to notice that each frame probably should, for this reason, specify `apiVersion` and `kind`. With this, the example would look like:
+
+```yaml
+# Frame 1
+apiVersion: foo.com/v1
+kind: T1
+foo: bar
+bla: true
+```
+
+```yaml
+# Frame 2
+apiVersion: foo.com/v1
+kind: T2
+bar: 123
+```
+
+> Note: The struct name and the `kind` necessarily don't need to match, but this is by convention the far most popular way to do it.
+
+### Empty Frames
+
+Empty frames must be ignored, because they are not decodable; they don't map to exactly one Go struct.
+
+To illustrate, the following YAML file contains 2 frames:
+
+```yaml
+
+---
+
+---
+
+# Frame 1
+apiVersion: foo.com/v1
+kind: T1
+foo: bar
+bla: true
+
+---
+
+
+---
+
+# Frame 2
+apiVersion: foo.com/v1
+kind: T2
+bar: 123
+
+---
+```
+
+TODO: Investigate what happens (or should happen) if there's only comments in a frame. One thing that could be caught in the sanitation process is if the top-level document doesn't any children. However, shall we support retaining that comment-only frame?
+
+### Lists
+
+As per the [Kubernetes API conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#types-kinds), there are "special" kinds with the `List` suffix that contain multiple objects _within the same frame_.
+
+These lists are useful in the REST communication between `kubectl` and the API server, for example. If you want to get a set of same-kind items from the API server, you'd invoke an HTTP request along the lines of:
+
+```http
+GET /api/v1/namespaces/default/services
+```
+
+and get a response of the form:
+
+```json
+{
+ "kind": "ServiceList",
+ "apiVersion": "v1",
+ "metadata": {
+ "resourceVersion": "606"
+ },
+ "items": [
+ {
+ "metadata": {
+ "name": "kubernetes",
+ "namespace": "default",
+ "labels": {
+ "component": "apiserver",
+ "provider": "kubernetes"
+ }
+ },
+ "spec": {
+ "clusterIP": "10.96.0.1",
+ },
+ "status": {}
+ }
+ ]
+}
+```
+
+(this can be tested with `kubectl get --raw=/api/v1/namespaces/default/services | jq .`)
+
+Why bother returning a `kind: ServiceList` instead of a set of `kind: Service`, separated as JSON frames demonstrated above?
+
+The answer is: a need for returning metadata about the response itself. For example, we can see here that `.metadata.resourceVersion` of the `ServiceList` is set. Other examples of list metadata is pagination headers and information, in case the returned list would be too large to return in only one request.
+
+This does seem specific to just REST communication, and yes, pretty much it is. However, for controllers it presents a nice feature.
+
+The Go struct for typed list (like `ServiceList`), looks something like this:
+
+```go
+// From https://github.com/kubernetes/api/blob/v0.21.1/core/v1/types.go#L4423
+type ServiceList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []Service `json:"items"`
+}
+```
+
+If I, as a controller developer, would like to ask for a list of services, what do I do when using e.g. the `controller-runtime` [`Client`](https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.9.2/pkg/client#Reader)?
+
+The answer is, allocate an empty `ServiceList`, pass a pointer of that like follows to get the data:
+
+```go
+var svclist []v1.ServiceList
+err := client.List(ctx, &svclist)
+// svclist.Items is now populated with all returned Services at page 1
+...
+// If the list of services was larger than the allowed response size, a
+// fraction of the services will be returned on the same call. But due to
+// that during the first List call, the list's metadata was populated with
+// information about what page to ask for next, one can just call List again
+// to get the next page.
+err := client.List(ctx, &svclist)
+// consume more Services at page 2
+```
+
+What is useful here, is that `svclist.Items` is of type `[]v1.Service` by definition. There is no need to cast generic objects to Services before using them. Additionally, if the list would contain something else than a `Service`, the decoder would be unable to decode and fail with an error.
+
+These are the existing advantages of using a `List`; these are documented here for additional context.
+
+Because both JSON and YAML support multiple frames, there is technically no direct need to use a `List` in e.g. files checked into Git, if the application reading the byte stream supports framing, that is. If the reading application does not support YAML/JSON framing, using a `List` that can be directly decoded is convenient.
+
+This gives us the conclusion that the following YAML file shall be treated as valid.
+
+```yaml
+---
+apiVersion: cluster.x-k8s.io/v1alpha4
+kind: MachineList
+items:
+- apiVersion: cluster.x-k8s.io/v1alpha4
+ kind: Machine
+ spec:
+ clusterName: "my-cluster"
+- apiVersion: cluster.x-k8s.io/v1alpha4
+ kind: Machine
+ spec:
+ clusterName: "other-cluster"
+---
+---
+apiVersion: cluster.x-k8s.io/v1alpha4
+kind: Machine
+spec:
+ clusterName: "other-cluster"
+---
+```
+
+How many valid frames are there in the above YAML stream? 2. There's one empty frame that is skipped, one `List` and one "normal" object.
+
+From a framing point of view, we don't know anything about what a `List` is, but it satisfies the contract defined above of being decodable into a single Go struct.
+
+### Limiting Frame Size and Count
+
+If you read a byte stream whose size you're unaware of, e.g. when reading from `/dev/stdin` or an HTTP request, you don't want to open yourself up to a situation where you read garbage forever, sent by a malicious actor. This represents a Denial of Service (DoS) attack vector for your application.
+
+To mitigate that, the builtin `frame.Reader` (and `frame.Writer`, but that's not as important, as the bytes are already in memory) has options to limit the size (byte count) of each frame, and the total frame count, to avoid this situation generally.
+
+The default frame size is 3 Megabytes, which matches the default Kubernetes API server maximum body size.
+
+### Recognizing Readers/Writers
+
+TODO
+
+TODO: We should maybe allow YAML as in "JSON with comments". How to auto-recognize?
+
+```yaml
+# This is valid YAML, but invalid JSON, due to these comments
+# This works, because YAML is a superset of JSON, and hence one
+# can use any valid JSON file, with YAML "extensions" like comments.
+{
+ # Comment
+ "foo": "bar" # Comment
+}
+```
+
+### Single Readers/Writers
+
+TODO (Any content type)
diff --git a/go.mod b/go.mod
index c03013fb..dbc33a44 100644
--- a/go.mod
+++ b/go.mod
@@ -1,31 +1,42 @@
module github.com/weaveworks/libgitops
-go 1.14
+go 1.15
-replace (
- github.com/docker/distribution => github.com/docker/distribution v2.7.1+incompatible
- github.com/googleapis/gnostic => github.com/googleapis/gnostic v0.3.0
-)
+replace github.com/docker/distribution => github.com/docker/distribution v2.7.1+incompatible
require (
- github.com/fluxcd/go-git-providers v0.0.2
- github.com/fluxcd/toolkit v0.0.1-beta.2
- github.com/go-git/go-git/v5 v5.1.0
- github.com/go-openapi/spec v0.19.8
+ github.com/evanphx/json-patch v4.11.0+incompatible
+ github.com/fluxcd/go-git-providers v0.2.0
+ github.com/fluxcd/pkg/ssh v0.2.0
+ github.com/go-git/go-git/v5 v5.4.2
+ github.com/go-logr/logr v0.4.0
+ github.com/go-openapi/spec v0.20.3
+ github.com/google/btree v1.0.1
github.com/google/go-github/v32 v32.1.0
github.com/labstack/echo v3.3.10+incompatible
github.com/labstack/gommon v0.3.0 // indirect
- github.com/mattn/go-isatty v0.0.12 // indirect
+ github.com/mattn/go-isatty v0.0.13 // indirect
github.com/mitchellh/go-homedir v1.1.0
github.com/rjeczalik/notify v0.9.2
- github.com/sirupsen/logrus v1.6.0
+ github.com/sirupsen/logrus v1.8.1
+ github.com/spf13/afero v1.6.0
github.com/spf13/pflag v1.0.5
- github.com/stretchr/testify v1.6.1
- golang.org/x/net v0.0.0-20200625001655-4c5254603344 // indirect
- golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d
- k8s.io/apimachinery v0.18.6
- k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6
- sigs.k8s.io/controller-runtime v0.6.0
- sigs.k8s.io/kustomize/kyaml v0.1.11
+ github.com/stretchr/testify v1.7.0
+ go.opentelemetry.io/otel v1.0.0-RC2
+ go.opentelemetry.io/otel/exporters/jaeger v1.0.0-RC2
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.0-RC2
+ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.0.0-RC2
+ go.opentelemetry.io/otel/sdk v1.0.0-RC2
+ go.opentelemetry.io/otel/trace v1.0.0-RC2
+ go.uber.org/atomic v1.7.0
+ go.uber.org/multierr v1.6.0
+ go.uber.org/zap v1.18.1
+ golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c
+ k8s.io/apimachinery v0.21.3
+ k8s.io/kube-openapi v0.0.0-20210527164424-3c818078ee3d
+ k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471
+ sigs.k8s.io/cluster-api v0.4.0
+ sigs.k8s.io/controller-runtime v0.9.5
+ sigs.k8s.io/kustomize/kyaml v0.11.1-0.20210721155208-d6ce84604738
sigs.k8s.io/yaml v1.2.0
)
diff --git a/go.sum b/go.sum
index c1ecf376..0bb87307 100644
--- a/go.sum
+++ b/go.sum
@@ -1,173 +1,215 @@
-bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
-github.com/360EntSecGroup-Skylar/excelize v1.4.1/go.mod h1:vnax29X2usfl7HHkBrX5EvSCJcmH3dT9luvxzu8iGAE=
+cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
+cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
+cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
+cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
+cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
+cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
+cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
+cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
+cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
+cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
+cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
+cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
+cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
+cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
+cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
+cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
+cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
+cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
+cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
+cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
+cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
+cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
+cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
+cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
+cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
+cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
+cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
+cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
+cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
+cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
+cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
+cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
+cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
+cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
+dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
-github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
-github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
-github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
-github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
-github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
-github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
-github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
+github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw=
+github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
+github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
+github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
+github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
+github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E=
-github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
-github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
-github.com/Masterminds/sprig/v3 v3.0.2/go.mod h1:oesJ8kPONMONaZgtiHNzUShJbksypC5kWczhZAf6+aU=
-github.com/Masterminds/vcs v1.13.1/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA=
-github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
-github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
+github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
+github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
+github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
+github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk=
+github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM=
-github.com/PuerkitoBio/goquery v1.5.0/go.mod h1:qD2PgZ9lccMbQlc7eEOjaeRlFQON7xY8kdmcsrnKqMg=
-github.com/PuerkitoBio/purell v1.0.0 h1:0GoNN3taZV6QI81IXgCbxMyEaJDXMSIjArYBCYzVVvs=
-github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 h1:YoJbenK9C67SkzkDfmQuVln04ygHj3vjZfd9FL+GmQQ=
+github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo=
github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2 h1:JCHLVE3B+kJde7bIEo5N4J+ZbLhp0J1Fs+ulyRws4gE=
-github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
-github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
+github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk=
+github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4=
github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
-github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs=
-github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
+github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
+github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
-github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
+github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
+github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
+github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
+github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
-github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg=
+github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
+github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
-github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4=
+github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
+github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
-github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
+github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
-github.com/bombsimon/wsl v1.2.5/go.mod h1:43lEF/i0kpXbLCeDXL9LMT8c92HyBywXb0AsgMHYngM=
+github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ=
+github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
+github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
+github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
+github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
-github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
-github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
-github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
-github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
-github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY=
-github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
-github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
-github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
-github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
+github.com/coredns/caddy v1.1.0 h1:ezvsPrT/tA/7pYDBZxu0cT0VmWk75AfIaf6GSYCNMf0=
+github.com/coredns/caddy v1.1.0/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4=
+github.com/coredns/corefile-migration v1.0.12 h1:TJGATo0YLQJVIKJZLajXE1IrhRFtYTR1cYsGIT1YNEk=
+github.com/coredns/corefile-migration v1.0.12/go.mod h1:NJOI8ceUF/NTgEwtjD+TUq3/BnH/GF7WAM3RzCa3hBo=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
+github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
-github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE=
-github.com/deislabs/oras v0.8.1/go.mod h1:Mx0rMSbBNaNfY9hjpccEnxkOqJL6KGjtxNHPLC4G4As=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-github.com/docker/cli v0.0.0-20200130152716-5d0cf8839492/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
+github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
-github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
-github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
+github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
+github.com/drone/envsubst/v2 v2.0.0-20210615175204-7bf45dbf5372/go.mod h1:esf2rsHFNlZlxsqsZDojNBcnNs5REqIvRrWRHqX0vEU=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/dustmop/soup v1.1.2-0.20190516214245-38228baa104e/go.mod h1:CgNC6SGbT+Xb8wGGvzilttZL1mc5sQ/5KkcxsZttMIk=
-github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
-github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633 h1:H2pdYOb3KQ1/YsqVWoWNLQO+fusocsw354rqGTZtAgw=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg=
github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o=
+github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
+github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
+github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
+github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
-github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M=
+github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ=
github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs=
+github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4=
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-github.com/fluxcd/go-git-providers v0.0.2 h1:NGJeJl1TOJKbxtQkRL9JOk5lIopR1XNi6hGgZC5+8IE=
-github.com/fluxcd/go-git-providers v0.0.2/go.mod h1:2Fp9GDxIcllNR7pm5clXhInPyue4VggecaH83KhkpNw=
-github.com/fluxcd/kustomize-controller v0.0.1-beta.2/go.mod h1:mLeipvpQkyof6b5IHNtqeA8CmbjfVIf92UyKkpeBY98=
-github.com/fluxcd/source-controller v0.0.1-beta.2/go.mod h1:tmscNdCxEt7+Xt2g1+bI38hMPw2leYMFAaCn4UlMGuw=
-github.com/fluxcd/toolkit v0.0.1-beta.2 h1:JG80AUIGd936QJ6Vs/xZweoKcE6j7Loua5Wn6Q/pVh8=
-github.com/fluxcd/toolkit v0.0.1-beta.2/go.mod h1:NqDXj2aeVMbVkrCHeP/r0um+edXXyeGlG/9pKZLqGdM=
+github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM=
+github.com/fluxcd/go-git-providers v0.2.0 h1:2dxT4r9UDjKwsNFmO9wcSR2FUqKyvsDwha5b/zvK1Ko=
+github.com/fluxcd/go-git-providers v0.2.0/go.mod h1:nRgNpHZmZhrsyNSma1JcAhjUG9xrqMGJcIUr9K7M7vk=
+github.com/fluxcd/pkg/ssh v0.2.0 h1:e9V+HReOL7czm7edVzYS1e+CnFKz1/kHiUNfLRpBdH8=
+github.com/fluxcd/pkg/ssh v0.2.0/go.mod h1:EpQC7Ztdlbi8S/dlYXqVDZtHtLpN3FNl3N6zWujVzbA=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
-github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
+github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
+github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
-github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0=
github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
-github.com/go-critic/go-critic v0.3.5-0.20190904082202-d79a9f0c64db/go.mod h1:+sE8vrLDS2M0pZkBk0wy6+nLdKexVDrl/jBqQOTDThA=
github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w=
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4=
github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E=
-github.com/go-git/go-billy/v5 v5.0.0 h1:7NQHvd9FVid8VL4qVUMm8XifBK+2xCoZ2lSk0agRrHM=
-github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
-github.com/go-git/go-git-fixtures/v4 v4.0.1 h1:q+IFMfLx200Q3scvt2hN79JsEzy4AmBTp/pqnefH+Bc=
-github.com/go-git/go-git-fixtures/v4 v4.0.1/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw=
-github.com/go-git/go-git/v5 v5.0.0/go.mod h1:oYD8y9kWsGINPFJoLdaScGCN6dlKg23blmClfZwtUVA=
-github.com/go-git/go-git/v5 v5.1.0 h1:HxJn9g/E7eYvKW3Fm7Jt4ee8LXfPOm/H1cdDu8vEssk=
-github.com/go-git/go-git/v5 v5.1.0/go.mod h1:ZKfuPUoY1ZqIG4QG9BDBh3G4gLM5zvPuSJAozQrZuyM=
+github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
+github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34=
+github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
+github.com/go-git/go-git-fixtures/v4 v4.2.1 h1:n9gGL1Ct/yIw+nfsfr8s4+sbhT+Ncu2SubfXjIWgci8=
+github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0=
+github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4=
+github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc=
+github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
+github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
-github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM=
+github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg=
+github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
-github.com/go-logr/zapr v0.1.0 h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54=
-github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk=
-github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
+github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc=
+github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-logr/zapr v0.4.0 h1:uc1uML3hRYL9/ZZPdgHS/n8Nzo+eaYL/Efxkkamf7OM=
+github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk=
github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
@@ -176,22 +218,18 @@ github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2
github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94=
-github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1 h1:wSt/4CYxs70xbATrGXhokKF1i0tZjENLOo1ioIO13zk=
-github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
-github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0=
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
-github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9 h1:tF+augKRWlWx0J0B7ZyyKSiTyV6E1zZe+7b3qQlcEf8=
-github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
+github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
+github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
-github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w=
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
-github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
+github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM=
+github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
@@ -200,128 +238,129 @@ github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCs
github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA=
github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64=
github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4=
-github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501 h1:C1JKChikHGpXwT5UQDFaryIpDtyyGL/CR6C2kB7F1oc=
-github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY=
-github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc=
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
-github.com/go-openapi/spec v0.19.5 h1:Xm0Ao53uqnk9QE/LlYV5DEU09UAgpliA85QoT9LzqPw=
github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk=
-github.com/go-openapi/spec v0.19.8 h1:qAdZLh1r6QF/hI/gTq+TJTvsQUodZsM7KLqkAJdiJNg=
-github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk=
+github.com/go-openapi/spec v0.20.3 h1:uH9RQ6vdyPSs2pSy9fL8QPspDF2AMIMPtmK5coSSjtQ=
+github.com/go-openapi/spec v0.20.3/go.mod h1:gG4F8wdEDN+YPBMVnzE85Rbhf+Th2DTvA9nFPQ5AYEg=
github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY=
github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU=
github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk=
-github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87 h1:zP3nY8Tk2E6RTkqGYrarZXuzh+ffyLDljLxCy1iJw80=
-github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
-github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE=
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng=
+github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA=
-github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4=
github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
-github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4=
-github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ=
-github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY=
-github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY=
-github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086/go.mod h1:mP93XdblcopXwlyN4X4uodxXQhldPGZbcEJIimQHrkg=
-github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw=
-github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU=
-github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30/go.mod h1:SV2ur98SGypH1UjcPpCatrV5hPazG6+IfNHbkDXBRrk=
-github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI=
-github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoMrjiy4zvdS+Bg6z9jZH82QXwkcgCBX6nOfnmdaHks=
-github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc=
-github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8=
-github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU=
-github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
-github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
-github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
-github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
+github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
+github.com/gobuffalo/flect v0.2.3 h1:f/ZukRnSNA/DUpSNDadko7Qc0PhGvsew35p/2tu+CRY=
+github.com/gobuffalo/flect v0.2.3/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc=
+github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
-github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
-github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
-github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
+github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
-github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
+github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
+github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
+github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
+github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
-github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0=
+github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
-github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4=
-github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk=
-github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0=
-github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8=
-github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3/go.mod h1:JXrF4TWy4tXYn62/9x8Wm/K/dm06p8tCKwFRDPZG/1o=
-github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU=
-github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU=
-github.com/golangci/golangci-lint v1.21.0/go.mod h1:phxpHK52q7SE+5KpPnti4oZTdFCEsn/tKN+nFvCKXfk=
-github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU=
-github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg=
-github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o=
-github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA=
-github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI=
-github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4=
-github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ=
-github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho=
-github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8=
+github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
+github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
+github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-github/v32 v32.0.0 h1:q74KVb22spUq0U5HqZ9VCYqQz8YRuOtL/39ZnfwO+NM=
-github.com/google/go-github/v32 v32.0.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI=
+github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-github/v32 v32.1.0 h1:GWkQOdXqviCPx7Q7Fj+KyPoGm4SwHRh8rheoPhd27II=
github.com/google/go-github/v32 v32.1.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI=
+github.com/google/go-github/v33 v33.0.0/go.mod h1:GMdDnVZY/2TsWgp/lkYnpSAh6TrzhANBBwm6k6TTEXg=
github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
-github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
-github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
+github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
+github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
+github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
-github.com/googleapis/gnostic v0.3.0 h1:CcQijm0XKekKjP/YCz28LXVSpgguuB+nCxaSjCe09y0=
-github.com/googleapis/gnostic v0.3.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
-github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
+github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
+github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
+github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
+github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw=
+github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
+github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE=
+github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA=
@@ -331,61 +370,74 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
+github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
+github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
+github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
-github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
+github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
+github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
+github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
-github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I=
-github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
+github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
+github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
+github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
+github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
+github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
-github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
+github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
+github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
+github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
+github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
-github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4=
+github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg=
-github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
+github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
+github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
+github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
+github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
-github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ=
+github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
-github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a/go.mod h1:UJSiEoRfvx3hP73CvoARgeLjaIOjybY9vj8PUPPFGeU=
+github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
+github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
+github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k=
github.com/k0kubun/pp v2.3.0+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg=
-github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd h1:Coekwdh0v2wtGp9Gmz1Ze3eVRAWJMLokvN3QjdzCHLY=
-github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
+github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 h1:DowS9hvgyYSX4TO5NpyC606/Z4SxnNYbT+WX27or6Ck=
+github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
-github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
-github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
-github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
-github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
-github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
-github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
-github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
@@ -394,113 +446,110 @@ github.com/labstack/echo v3.3.10+incompatible h1:pGRcYk231ExFAyoAjAfD85kQzRJCRI8
github.com/labstack/echo v3.3.10+incompatible/go.mod h1:0INS7j/VjnFxD4E2wkz67b8cVwCLbBmJyDaka6Cmk1s=
github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0=
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
-github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc=
-github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4=
-github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
-github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a h1:TpvdAwDAt1K4ANVOfcihouRdvP+MgAfDWwBuct4l6ZY=
-github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM=
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
-github.com/manifoldco/promptui v0.7.0/go.mod h1:n4zTdgP0vr0S3w7/O/g98U+e0gwLScEXGwov2nIKuGQ=
-github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s=
+github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
+github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
+github.com/markbates/pkger v0.17.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI=
+github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A=
+github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
-github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
-github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
+github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
+github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
-github.com/mattn/go-isatty v0.0.9 h1:d5US/mDsogSGW37IV293h//ZFaeajb69h+EHFsv2xGg=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
-github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
+github.com/mattn/go-isatty v0.0.13 h1:qdl+GuBjcsKKDco5BsxPJlId98mSWNKqYA+Co0SC1yA=
+github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
-github.com/mattn/go-shellwords v1.0.9/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
-github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
-github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
+github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
+github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw=
+github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
+github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
+github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
+github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
+github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
+github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
+github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
+github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
+github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8=
-github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
-github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk=
+github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0=
+github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
-github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU=
-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
-github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
+github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
+github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA=
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
+github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E=
+github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
+github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
-github.com/onsi/gomega v1.8.1 h1:C5Dqfs/LeauYDX0jJXIe2SWmwCbGzx9yF8C8xy3Lh34=
-github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
-github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
-github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
-github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
-github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
-github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
-github.com/paulmach/orb v0.1.3/go.mod h1:VFlX/8C+IQ1p6FTRRKzKoOPJnvEtA5G0Veuqwbu//Vk=
+github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY=
+github.com/onsi/gomega v1.14.0 h1:ep6kpPVwmr/nTbklSx2nrLNSIO62DoYAhnPNIMhK8gI=
+github.com/onsi/gomega v1.14.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
+github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
+github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
+github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
+github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
-github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
+github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
+github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
+github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -509,320 +558,569 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
+github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
+github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ=
+github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
+github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
+github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
+github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
-github.com/qri-io/starlib v0.4.2-0.20200213133954-ff2e8cd5ef8d/go.mod h1:7DPO4domFU579Ga6E61sB9VFNaniPVwJP5C4bBCu3wA=
-github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI=
-github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
github.com/rjeczalik/notify v0.9.2 h1:MiTWrPj55mNDHEiIX5YUSKefw/+lCQVoAFmD6oQm5w8=
github.com/rjeczalik/notify v0.9.2/go.mod h1:aErll2f0sUX9PXZnVNyeiObbmTlk5jnMoCa4QEjJeqM=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
-github.com/securego/gosec v0.0.0-20191002120514-e680875ea14d/go.mod h1:w5+eXa0mYznDkHaMCXA4XYffjlH+cy1oyKbfzJXa2Do=
+github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
+github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
-github.com/shirou/gopsutil v0.0.0-20190901111213-e4ec7b275ada/go.mod h1:WWnYX4lzhCH5h/3YBfyVA3VbLYjlMZZAQcW9ojMexNc=
-github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc=
-github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
-github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
-github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
-github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
-github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
+github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
+github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
+github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
-github.com/sosedoff/gitkit v0.2.1-0.20191202022816-7182d43c6254/go.mod h1:A+o6ZazfVJwetlcHz3ah6th66XcBdsyzLo+aBt/AsK4=
-github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
+github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=
+github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
-github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
+github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
-github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
+github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
+github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
+github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
+github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
+github.com/spf13/viper v1.8.0/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
+github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
-github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.2.3-0.20181224173747-660f15d67dbb/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
+github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
-github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
-github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
-github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
-github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA=
-github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA=
-github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/uudashr/gocognit v0.0.0-20190926065955-1655d0de0517/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
-github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s=
github.com/valyala/fasttemplate v1.0.1 h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8WdUSz8=
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
-github.com/valyala/quicktemplate v1.2.0/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4=
-github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio=
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
-github.com/xanzy/go-gitlab v0.33.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug=
-github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70=
-github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4=
-github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
-github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
-github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
-github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
+github.com/xanzy/go-gitlab v0.43.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug=
+github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI=
+github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
-github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8=
+github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca h1:1CFlNzQhALwjS9mBAUkycX616GzgsuYUOCHA5+HSlXI=
github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
-github.com/yujunz/go-getter v1.4.1-lite/go.mod h1:sbmqxXjyLunH1PkF3n7zSlnVeMvmYUuIl9ZVs/7NyCc=
+github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
+go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
+go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
+go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
+go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
+go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
+go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
-go.starlark.net v0.0.0-20190528202925-30ae18b8564f/go.mod h1:c1/X6cHgvdXj6pUlmWKMkuqRnW4K8x2vwt6JAaaircg=
+go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
+go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
+go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opentelemetry.io/otel v1.0.0-RC2 h1:SHhxSjB+omnGZPgGlKe+QMp3MyazcOHdQ8qwo89oKbg=
+go.opentelemetry.io/otel v1.0.0-RC2/go.mod h1:w1thVQ7qbAy8MHb0IFj8a5Q2QU0l2ksf8u/CN8m3NOM=
+go.opentelemetry.io/otel/exporters/jaeger v1.0.0-RC2 h1:RF0nWsIDpDBe+s06lkLxUw9CWQUAhO6hBSxxB7dz45s=
+go.opentelemetry.io/otel/exporters/jaeger v1.0.0-RC2/go.mod h1:sZZqN3Vb0iT+NE6mZ1S7sNyH3t4PFk6ElK5TLGFBZ7E=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.0-RC2 h1:Z/91DSYkOqnVuECrd+hxCU9lzeo5Fihjp28uq0Izfpw=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.0-RC2/go.mod h1:T+s8GKi1OqMwPuZ+ouDtZW4vWYpJuzIzh2Matq4Jo9k=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.0-RC2 h1:PaSlrCE+hRbamroLGGgFDmzDamCxp7ID+hBvPmOhcSc=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.0-RC2/go.mod h1:3shayJIFcDqHi9/GT2fAHyMI/bRgc6FO0CAkhaDkhi0=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.0.0-RC2 h1:crksoFyTPDDywRJDUW36OZma+C3HhcYwQLPUZZMXFO0=
+go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.0.0-RC2/go.mod h1:6kVxj1C/f3irP/IeeZNbcEwbg3rwnM6a7bCrcGbIJeI=
+go.opentelemetry.io/otel/sdk v1.0.0-RC2 h1:ROuteeSCBaZNjiT9JcFzZepmInDvLktR28Y6qKo8bCs=
+go.opentelemetry.io/otel/sdk v1.0.0-RC2/go.mod h1:fgwHyiDn4e5k40TD9VX243rOxXR+jzsWBZYA2P5jpEw=
+go.opentelemetry.io/otel/trace v1.0.0-RC2 h1:dunAP0qDULMIT82atj34m5RgvsIK6LcsXf1c/MsYg1w=
+go.opentelemetry.io/otel/trace v1.0.0-RC2/go.mod h1:JPQ+z6nNw9mqEGT8o3eoPTdnNI+Aj5JcxEsVGREIAy4=
+go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+go.opentelemetry.io/proto/otlp v0.9.0 h1:C0g6TWmQYvjKRnljRULLWUVJGy8Uvu0NEL/5frY2/t4=
+go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg=
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
+go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=
+go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
+go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
+go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
+go.uber.org/zap v1.18.1 h1:CSUJ2mjFszzEWt4CdKISEuChVIXGBn3lAPwkRGyVrc4=
+go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU=
+golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
-golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073 h1:xMPOj6Pz6UipU1wXLkrtqpHbR0AVFnyPEQq/wRWz9lM=
-golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
+golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b h1:7mWr3k41Qtv8XlltBkDkl8LoP3mpSgBW8BUoxtEdbXg=
+golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
-golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
+golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
+golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
+golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
+golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
+golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=
+golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
-golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
+golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU=
+golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
-golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4=
+golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
+golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
+golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k=
+golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0=
+golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
-golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20210615190721-d04028783cf1 h1:x622Z2o4hgCr/4CiKWc51jHVKaWdtVpBNmEI8wI9Qns=
+golang.org/x/oauth2 v0.0.0-20210615190721-d04028783cf1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180926160741-c2ed4eda69e7/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d h1:QQrM/CCYEzTs91GZylDCQjGHudbPTxF/1fvXdVh5lMo=
-golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
+golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE=
+golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
+golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
-golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
-golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59 h1:QjA/9ArTfVTLfEhClDCG7SGrZkZixxWpwNCDiwJfh88=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190719005602-e377ae9d6386/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
-golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20190930201159-7c411dea38b0/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/tools v0.0.0-20191010075000-0337d82405ff h1:XdBG6es/oFDr1HwaxkxgVve7NB281QhxgK/i4voubFs=
-golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
+golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
+golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
+golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA=
+golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-gomodules.xyz/jsonpatch/v2 v2.0.1 h1:xyiBuvkD2g5n7cYzx6u2sxQvsAy4QJsZFCzGVdzOXZ0=
-gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU=
-gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0=
-gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
-gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY=
+gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
+google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
+google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
+google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
+google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
+google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
+google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
+google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
+google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
+google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
+google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
+google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
+google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
google.golang.org/appengine v1.0.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
-google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
+google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
+google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
+google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
+google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
+google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
+google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
+google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
+google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
+google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
+google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0=
+google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
-google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
+google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
+google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
+google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
+google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
+google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
+google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
+google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
+google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
+google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
+google.golang.org/grpc v1.39.0 h1:Klz8I9kdtkIN6EpHHUOMLCYhTn/2WAe5a0s1hcBkdTI=
+google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
-google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
+google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
-gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
+google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
+google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
+google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
-gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
@@ -832,96 +1130,88 @@ gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2 h1:XZx7nhd5GMaZpmDaEHFVafUZC7ya0fuo7cSJ3UCKYmM=
-gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
-helm.sh/helm/v3 v3.1.2/go.mod h1:WYsFJuMASa/4XUqLyv54s0U/f3mlAaRErGmyy4z921g=
+gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
+gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
+gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
-k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI=
-k8s.io/api v0.17.2/go.mod h1:BS9fjjLc4CMuqfSO8vgbHPKMt5+SF0ET6u/RVDihTo4=
-k8s.io/api v0.18.2 h1:wG5g5ZmSVgm5B+eHMIbI9EGATS2L8Z72rda19RIEgY8=
-k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78=
-k8s.io/apiextensions-apiserver v0.17.2/go.mod h1:4KdMpjkEjjDI2pPfBA15OscyNldHWdBCfsWMDWAmSTs=
-k8s.io/apiextensions-apiserver v0.18.2 h1:I4v3/jAuQC+89L3Z7dDgAiN4EOjN6sbm6iBqQwHTah8=
-k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJFlKL37fC4ZvY=
-k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
-k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
-k8s.io/apimachinery v0.18.2 h1:44CmtbmkzVDAhCpRVSiP2R5PPrC2RtlIv/MoB8xpdRA=
-k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA=
-k8s.io/apimachinery v0.18.6 h1:RtFHnfGNfd1N0LeSrKCUznz5xtUP1elRGvHJbL3Ntag=
-k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
-k8s.io/apiserver v0.17.2/go.mod h1:lBmw/TtQdtxvrTk0e2cgtOxHizXI+d0mmGQURIHQZlo=
-k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw=
-k8s.io/cli-runtime v0.17.2/go.mod h1:aa8t9ziyQdbkuizkNLAw3qe3srSyWh9zlSB7zTqRNPI=
-k8s.io/client-go v0.17.0/go.mod h1:TYgR6EUHs6k45hb6KWjVD6jFZvJV4gHDikv/It0xz+k=
-k8s.io/client-go v0.17.2/go.mod h1:QAzRgsa0C2xl4/eVpeVAZMvikCn8Nm81yqVx3Kk9XYI=
-k8s.io/client-go v0.18.2 h1:aLB0iaD4nmwh7arT2wIn+lMnAq7OswjaejkQ8p9bBYE=
-k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU=
-k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s=
-k8s.io/code-generator v0.18.2 h1:C1Nn2JiMf244CvBDKVPX0W2mZFJkVBg54T8OV7/Imso=
-k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc=
-k8s.io/component-base v0.17.2/go.mod h1:zMPW3g5aH7cHJpKYQ/ZsGMcgbsA/VyhEugF3QT1awLs=
-k8s.io/component-base v0.18.2 h1:SJweNZAGcUvsypLGNPNGeJ9UgPZQ6+bW+gEHe8uyh/Y=
-k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM=
-k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
-k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
-k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 h1:RPscN6KhmG54S33L+lr3GS+oD1jmchIU0ll519K6FA4=
-k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
-k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
-k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
-k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
-k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
-k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
-k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDNZ16KJtb49HfR3ZHB9qYXM=
-k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
-k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY=
-k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
-k8s.io/kubectl v0.17.2/go.mod h1:y4rfLV0n6aPmvbRCqZQjvOp3ezxsFgpqL+zF5jH/lxk=
-k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
-k8s.io/metrics v0.17.2/go.mod h1:3TkNHET4ROd+NfzNxkjoVfQ0Ob4iZnaHmSEA4vYpwLw=
-k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
-k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU=
-k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
-modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
-modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
-modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
-modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
-modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I=
-mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc=
-mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4=
-mvdan.cc/unparam v0.0.0-20190720180237-d51796306d8f/go.mod h1:4G1h5nDURzA3bwVMZIVpwbkw+04kSxk3rAtzlimaUJw=
-rsc.io/letsencrypt v0.0.3/go.mod h1:buyQKZ6IXrRnB7TdkHP0RyEybLx18HHyOSoTyoOLqNY=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0=
-sigs.k8s.io/controller-runtime v0.5.0/go.mod h1:REiJzC7Y00U+2YkMbT8wxgrsX5USpXKGhb2sCtAXiT8=
-sigs.k8s.io/controller-runtime v0.6.0 h1:Fzna3DY7c4BIP6KwfSlrfnj20DJ+SeMBK8HSFvOk9NM=
-sigs.k8s.io/controller-runtime v0.6.0/go.mod h1:CpYf5pdNY/B352A1TFLAS2JVSlnGQ5O2cftPHndTroo=
-sigs.k8s.io/kustomize v2.0.3+incompatible h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0=
-sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU=
-sigs.k8s.io/kustomize/api v0.4.1/go.mod h1:NqxqT+wbYHrD0P19Uu4dXiMsVwI1IwQs+MJHlLhmPqQ=
-sigs.k8s.io/kustomize/kyaml v0.1.11 h1:/VvWxVIgH5gG1K4A7trgbyLgO3tRBiAWNhLFVU1HEmo=
-sigs.k8s.io/kustomize/kyaml v0.1.11/go.mod h1:72/rLkSi+L/pHM1oCjwrf3ClU+tH5kZQvvdLSqIHwWU=
-sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
-sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06 h1:zD2IemQ4LmOcAumeiyDWXKUI2SO0NYDe3H6QGvPOVgU=
-sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18=
-sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
-sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E=
-sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
-sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
+honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+k8s.io/api v0.21.2/go.mod h1:Lv6UGJZ1rlMI1qusN8ruAp9PUBFyBwpEHAdG24vIsiU=
+k8s.io/api v0.21.3 h1:cblWILbLO8ar+Fj6xdDGr603HRsf8Wu9E9rngJeprZQ=
+k8s.io/api v0.21.3/go.mod h1:hUgeYHUbBp23Ue4qdX9tR8/ANi/g3ehylAqDn9NWVOg=
+k8s.io/apiextensions-apiserver v0.21.2/go.mod h1:+Axoz5/l3AYpGLlhJDfcVQzCerVYq3K3CvDMvw6X1RA=
+k8s.io/apiextensions-apiserver v0.21.3 h1:+B6biyUWpqt41kz5x6peIsljlsuwvNAp/oFax/j2/aY=
+k8s.io/apiextensions-apiserver v0.21.3/go.mod h1:kl6dap3Gd45+21Jnh6utCx8Z2xxLm8LGDkprcd+KbsE=
+k8s.io/apimachinery v0.21.2/go.mod h1:CdTY8fU/BlvAbJ2z/8kBwimGki5Zp8/fbVuLY8gJumM=
+k8s.io/apimachinery v0.21.3 h1:3Ju4nvjCngxxMYby0BimUk+pQHPOQp3eCGChk5kfVII=
+k8s.io/apimachinery v0.21.3/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI=
+k8s.io/apiserver v0.21.2/go.mod h1:lN4yBoGyiNT7SC1dmNk0ue6a5Wi6O3SWOIw91TsucQw=
+k8s.io/apiserver v0.21.3 h1:QxAgE1ZPQG5cPlHScHTnLxP9H/kU3zjH1Vnd8G+n5OI=
+k8s.io/apiserver v0.21.3/go.mod h1:eDPWlZG6/cCCMj/JBcEpDoK+I+6i3r9GsChYBHSbAzU=
+k8s.io/cli-runtime v0.21.2/go.mod h1:8u/jFcM0QpoI28f6sfrAAIslLCXUYKD5SsPPMWiHYrI=
+k8s.io/client-go v0.21.2/go.mod h1:HdJ9iknWpbl3vMGtib6T2PyI/VYxiZfq936WNVHBRrA=
+k8s.io/client-go v0.21.3 h1:J9nxZTOmvkInRDCzcSNQmPJbDYN/PjlxXT9Mos3HcLg=
+k8s.io/client-go v0.21.3/go.mod h1:+VPhCgTsaFmGILxR/7E1N0S+ryO010QBeNCv5JwRGYU=
+k8s.io/cluster-bootstrap v0.21.2 h1:GXvCxl619A0edhAprX8U5gUZ5lQCUf7xhDa7SkXnlx0=
+k8s.io/cluster-bootstrap v0.21.2/go.mod h1:OEm/gajtWz/ohbS4NGxkyTp/6f1fW3TBThgCQ1ljhHo=
+k8s.io/code-generator v0.21.2/go.mod h1:8mXJDCB7HcRo1xiEQstcguZkbxZaqeUOrO9SsicWs3U=
+k8s.io/code-generator v0.21.3/go.mod h1:K3y0Bv9Cz2cOW2vXUrNZlFbflhuPvuadW6JdnN6gGKo=
+k8s.io/component-base v0.21.2/go.mod h1:9lvmIThzdlrJj5Hp8Z/TOgIkdfsNARQ1pT+3PByuiuc=
+k8s.io/component-base v0.21.3 h1:4WuuXY3Npa+iFfi2aDRiOz+anhNvRfye0859ZgfC5Og=
+k8s.io/component-base v0.21.3/go.mod h1:kkuhtfEHeZM6LkX0saqSK8PbdO7A0HigUngmhhrwfGQ=
+k8s.io/component-helpers v0.21.2/go.mod h1:DbyFt/A0p6Cv+R5+QOGSJ5f5t4xDfI8Yb89a57DgJlQ=
+k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
+k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
+k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
+k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
+k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM=
+k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
+k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE=
+k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
+k8s.io/kube-openapi v0.0.0-20210527164424-3c818078ee3d h1:lUK8GPtuJy8ClWZhuvKoaLdKGPLq9H1PxWp7VPBZBkU=
+k8s.io/kube-openapi v0.0.0-20210527164424-3c818078ee3d/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
+k8s.io/kubectl v0.21.2/go.mod h1:PgeUclpG8VVmmQIl8zpLar3IQEpFc9mrmvlwY3CK1xo=
+k8s.io/metrics v0.21.2/go.mod h1:wzlOINZMCtWq8dR9gHlyaOemmYlOpAoldEIXE82gAhI=
+k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471 h1:DnzUXII7sVg1FJ/4JX6YDRJfLNAC7idRatPwe07suiI=
+k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
+rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
+sigs.k8s.io/cluster-api v0.4.0 h1:y9MxtU1uW9r9JtDyOQ/9BRXZEau2PGl2yOIozaxXO0E=
+sigs.k8s.io/cluster-api v0.4.0/go.mod h1:9ALETQ/6KGZ/kYiqvQGfjOx0CfVGE39d4VP3UrS5B24=
+sigs.k8s.io/controller-runtime v0.9.1/go.mod h1:cTqsgnwSOsYS03XwySYZj8k6vf0+eC4FJRcCgQ9elb4=
+sigs.k8s.io/controller-runtime v0.9.5 h1:WThcFE6cqctTn2jCZprLICO6BaKZfhsT37uAapTNfxc=
+sigs.k8s.io/controller-runtime v0.9.5/go.mod h1:q6PpkM5vqQubEKUKOM6qr06oXGzOBcCby1DA9FbyZeA=
+sigs.k8s.io/kustomize/api v0.8.8/go.mod h1:He1zoK0nk43Pc6NlV085xDXDXTNprtcyKZVm3swsdNY=
+sigs.k8s.io/kustomize/cmd/config v0.9.10/go.mod h1:Mrby0WnRH7hA6OwOYnYpfpiY0WJIMgYrEDfwOeFdMK0=
+sigs.k8s.io/kustomize/kustomize/v4 v4.1.2/go.mod h1:PxBvo4WGYlCLeRPL+ziT64wBXqbgfcalOS/SXa/tcyo=
+sigs.k8s.io/kustomize/kyaml v0.10.17/go.mod h1:mlQFagmkm1P+W4lZJbJ/yaxMd8PqMRSC4cPcfUVt5Hg=
+sigs.k8s.io/kustomize/kyaml v0.11.1-0.20210721155208-d6ce84604738 h1:Nkg3viu9IE/TSzvYt4GGy5FkhdPk3bptXuxW5TnU9uo=
+sigs.k8s.io/kustomize/kyaml v0.11.1-0.20210721155208-d6ce84604738/go.mod h1:FTJxEZ86ScK184NpGSAQcfEqee0nul8oLCK30D47m4E=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
+sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno=
+sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
-sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
-vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI=
diff --git a/hack/generate-client.sh b/hack/generate-client.sh
deleted file mode 100755
index b7e5853d..00000000
--- a/hack/generate-client.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/bash
-
-SCRIPT_DIR=$( dirname "${BASH_SOURCE[0]}" )
-cd ${SCRIPT_DIR}/..
-
-RESOURCES="Car Motorcycle"
-CLIENT_NAME=SampleInternal
-OUT_DIR=cmd/sample-app/client
-API_DIR="github.com/weaveworks/libgitops/cmd/sample-app/apis/sample"
-mkdir -p ${OUT_DIR}
-for Resource in ${RESOURCES}; do
- resource=$(echo "${Resource}" | awk '{print tolower($0)}')
- sed -e "s|Resource|${Resource}|g;s|resource|${resource}|g;/build ignore/d;s|API_DIR|${API_DIR}|g;s|*Client|*${CLIENT_NAME}Client|g" \
- pkg/client/client_resource_template.go > \
- ${OUT_DIR}/zz_generated.client_${resource}.go
-done
diff --git a/pkg/client/client_dynamic.go b/pkg/client/client_dynamic.go
deleted file mode 100644
index 5f3ac2a4..00000000
--- a/pkg/client/client_dynamic.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// +build ignore
-
-package client
-
-import (
- "fmt"
-
- "github.com/weaveworks/libgitops/pkg/runtime"
- "github.com/weaveworks/libgitops/pkg/storage"
- "github.com/weaveworks/libgitops/pkg/storage/filterer"
- "k8s.io/apimachinery/pkg/runtime/schema"
-)
-
-// DynamicClient is an interface for accessing API types generically
-type DynamicClient interface {
- // New returns a new Object of its kind
- New() runtime.Object
- // Get returns an Object matching the UID from the storage
- Get(runtime.UID) (runtime.Object, error)
- // Set saves an Object into the persistent storage
- Set(runtime.Object) error
- // Patch performs a strategic merge patch on the object with
- // the given UID, using the byte-encoded patch given
- Patch(runtime.UID, []byte) error
- // Find returns an Object based on the given filter, filters can
- // match e.g. the Object's Name, UID or a specific property
- Find(filter filterer.BaseFilter) (runtime.Object, error)
- // FindAll returns multiple Objects based on the given filter, filters can
- // match e.g. the Object's Name, UID or a specific property
- FindAll(filter filterer.BaseFilter) ([]runtime.Object, error)
- // Delete deletes an Object from the storage
- Delete(uid runtime.UID) error
- // List returns a list of all Objects available
- List() ([]runtime.Object, error)
-}
-
-// dynamicClient is a struct implementing the DynamicClient interface
-// It uses a shared storage instance passed from the Client together with its own Filterer
-type dynamicClient struct {
- storage storage.Storage
- gvk schema.GroupVersionKind
- filterer *filterer.Filterer
-}
-
-// NewDynamicClient builds the dynamicClient struct using the storage implementation and a new Filterer
-func NewDynamicClient(s storage.Storage, gvk schema.GroupVersionKind) DynamicClient {
- return &dynamicClient{
- storage: s,
- gvk: gvk,
- filterer: filterer.NewFilterer(s),
- }
-}
-
-// New returns a new Object of its kind
-func (c *dynamicClient) New() runtime.Object {
- obj, err := c.storage.New(c.gvk)
- if err != nil {
- panic(fmt.Sprintf("Client.New must not return an error: %v", err))
- }
- return obj
-}
-
-// Get returns an Object based the given UID
-func (c *dynamicClient) Get(uid runtime.UID) (runtime.Object, error) {
- return c.storage.Get(c.gvk, uid)
-}
-
-// Set saves an Object into the persistent storage
-func (c *dynamicClient) Set(resource runtime.Object) error {
- return c.storage.Set(c.gvk, resource)
-}
-
-// Patch performs a strategic merge patch on the object with
-// the given UID, using the byte-encoded patch given
-func (c *dynamicClient) Patch(uid runtime.UID, patch []byte) error {
- return c.storage.Patch(c.gvk, uid, patch)
-}
-
-// Find returns an Object based on a given Filter
-func (c *dynamicClient) Find(filter filterer.BaseFilter) (runtime.Object, error) {
- return c.filterer.Find(c.gvk, filter)
-}
-
-// FindAll returns multiple Objects based on a given Filter
-func (c *dynamicClient) FindAll(filter filterer.BaseFilter) ([]runtime.Object, error) {
- return c.filterer.FindAll(c.gvk, filter)
-}
-
-// Delete deletes the Object from the storage
-func (c *dynamicClient) Delete(uid runtime.UID) error {
- return c.storage.Delete(c.gvk, uid)
-}
-
-// List returns a list of all Objects available
-func (c *dynamicClient) List() ([]runtime.Object, error) {
- return c.storage.List(c.gvk)
-}
diff --git a/pkg/client/client_resource_template.go b/pkg/client/client_resource_template.go
deleted file mode 100644
index 53bc8741..00000000
--- a/pkg/client/client_resource_template.go
+++ /dev/null
@@ -1,152 +0,0 @@
-// +build ignore
-
-/*
- Note: This file is autogenerated! Do not edit it manually!
- Edit client_resource_template.go instead, and run
- hack/generate-client.sh afterwards.
-*/
-
-package client
-
-import (
- "fmt"
-
- api "API_DIR"
-
- log "github.com/sirupsen/logrus"
- "github.com/weaveworks/libgitops/pkg/runtime"
- "github.com/weaveworks/libgitops/pkg/storage"
- "github.com/weaveworks/libgitops/pkg/storage/filterer"
- "k8s.io/apimachinery/pkg/runtime/schema"
-)
-
-// ResourceClient is an interface for accessing Resource-specific API objects
-type ResourceClient interface {
- // New returns a new Resource
- New() *api.Resource
- // Get returns the Resource matching given UID from the storage
- Get(runtime.UID) (*api.Resource, error)
- // Set saves the given Resource into persistent storage
- Set(*api.Resource) error
- // Patch performs a strategic merge patch on the object with
- // the given UID, using the byte-encoded patch given
- Patch(runtime.UID, []byte) error
- // Find returns the Resource matching the given filter, filters can
- // match e.g. the Object's Name, UID or a specific property
- Find(filter filterer.BaseFilter) (*api.Resource, error)
- // FindAll returns multiple Resources matching the given filter, filters can
- // match e.g. the Object's Name, UID or a specific property
- FindAll(filter filterer.BaseFilter) ([]*api.Resource, error)
- // Delete deletes the Resource with the given UID from the storage
- Delete(uid runtime.UID) error
- // List returns a list of all Resources available
- List() ([]*api.Resource, error)
-}
-
-// Resources returns the ResourceClient for the Client object
-func (c *Client) Resources() ResourceClient {
- if c.resourceClient == nil {
- c.resourceClient = newResourceClient(c.storage, c.gv)
- }
-
- return c.resourceClient
-}
-
-// resourceClient is a struct implementing the ResourceClient interface
-// It uses a shared storage instance passed from the Client together with its own Filterer
-type resourceClient struct {
- storage storage.Storage
- filterer *filterer.Filterer
- gvk schema.GroupVersionKind
-}
-
-// newResourceClient builds the resourceClient struct using the storage implementation and a new Filterer
-func newResourceClient(s storage.Storage, gv schema.GroupVersion) ResourceClient {
- return &resourceClient{
- storage: s,
- filterer: filterer.NewFilterer(s),
- gvk: gv.WithKind(api.KindResource.Title()),
- }
-}
-
-// New returns a new Object of its kind
-func (c *resourceClient) New() *api.Resource {
- log.Tracef("Client.New; GVK: %v", c.gvk)
- obj, err := c.storage.New(c.gvk)
- if err != nil {
- panic(fmt.Sprintf("Client.New must not return an error: %v", err))
- }
- return obj.(*api.Resource)
-}
-
-// Find returns a single Resource based on the given Filter
-func (c *resourceClient) Find(filter filterer.BaseFilter) (*api.Resource, error) {
- log.Tracef("Client.Find; GVK: %v", c.gvk)
- object, err := c.filterer.Find(c.gvk, filter)
- if err != nil {
- return nil, err
- }
-
- return object.(*api.Resource), nil
-}
-
-// FindAll returns multiple Resources based on the given Filter
-func (c *resourceClient) FindAll(filter filterer.BaseFilter) ([]*api.Resource, error) {
- log.Tracef("Client.FindAll; GVK: %v", c.gvk)
- matches, err := c.filterer.FindAll(c.gvk, filter)
- if err != nil {
- return nil, err
- }
-
- results := make([]*api.Resource, 0, len(matches))
- for _, item := range matches {
- results = append(results, item.(*api.Resource))
- }
-
- return results, nil
-}
-
-// Get returns the Resource matching given UID from the storage
-func (c *resourceClient) Get(uid runtime.UID) (*api.Resource, error) {
- log.Tracef("Client.Get; UID: %q, GVK: %v", uid, c.gvk)
- object, err := c.storage.Get(c.gvk, uid)
- if err != nil {
- return nil, err
- }
-
- return object.(*api.Resource), nil
-}
-
-// Set saves the given Resource into the persistent storage
-func (c *resourceClient) Set(resource *api.Resource) error {
- log.Tracef("Client.Set; UID: %q, GVK: %v", resource.GetUID(), c.gvk)
- return c.storage.Set(c.gvk, resource)
-}
-
-// Patch performs a strategic merge patch on the object with
-// the given UID, using the byte-encoded patch given
-func (c *resourceClient) Patch(uid runtime.UID, patch []byte) error {
- return c.storage.Patch(c.gvk, uid, patch)
-}
-
-// Delete deletes the Resource from the storage
-func (c *resourceClient) Delete(uid runtime.UID) error {
- log.Tracef("Client.Delete; UID: %q, GVK: %v", uid, c.gvk)
- return c.storage.Delete(c.gvk, uid)
-}
-
-// List returns a list of all Resources available
-func (c *resourceClient) List() ([]*api.Resource, error) {
- log.Tracef("Client.List; GVK: %v", c.gvk)
- list, err := c.storage.List(c.gvk)
- if err != nil {
- return nil, err
- }
-
- results := make([]*api.Resource, 0, len(list))
- for _, item := range list {
- results = append(results, item.(*api.Resource))
- }
-
- return results, nil
-}
diff --git a/pkg/content/constructors.go b/pkg/content/constructors.go
new file mode 100644
index 00000000..4b8d032c
--- /dev/null
+++ b/pkg/content/constructors.go
@@ -0,0 +1,142 @@
+package content
+
+import (
+ "bytes"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing/iotest"
+
+ "github.com/weaveworks/libgitops/pkg/content/metadata"
+)
+
+// newErrReader makes a Reader implementation that only returns the given error on Read()
+func newErrReader(err error, opts ...metadata.HeaderOption) Reader {
+ return NewReader(iotest.ErrReader(err), opts...)
+}
+
+const (
+ stdinPath = "/dev/stdin"
+ stdoutPath = "/dev/stdout"
+ stderrPath = "/dev/stderr"
+)
+
+func FromStdin(opts ...metadata.HeaderOption) Reader {
+ return FromFile(stdinPath, opts...)
+}
+
+// FromFile returns an io.ReadCloser from the given file, or an io.ReadCloser which returns
+// the given file open error when read.
+func FromFile(filePath string, opts ...metadata.HeaderOption) Reader {
+ // Support stdin
+ if filePath == "-" || filePath == stdinPath {
+ // Mark the source as /dev/stdin
+ opts = append(opts, metadata.WithContentLocation(stdinPath))
+ // TODO: Maybe have a way to override the TracerName through Metadata?
+ return NewReader(os.Stdin, opts...)
+ }
+
+ // Make sure the path is absolute
+ filePath, err := filepath.Abs(filePath)
+ if err != nil {
+ return newErrReader(err, opts...)
+ }
+ // Report the file path in the X-Content-Location header
+ opts = append(opts, metadata.WithContentLocation(filePath))
+
+ // Open the file
+ f, err := os.Open(filePath)
+ if err != nil {
+ return newErrReader(err, opts...)
+ }
+ fi, err := f.Stat()
+ if err != nil {
+ return newErrReader(err, opts...)
+ }
+
+ // Register the Content-Length header
+ opts = append(opts, metadata.WithContentLength(fi.Size()))
+
+ return NewReader(f, opts...)
+}
+
+// FromBytes returns an io.Reader from the given byte content.
+func FromBytes(content []byte, opts ...metadata.HeaderOption) Reader {
+ // Register the Content-Length
+ opts = append(opts, metadata.WithContentLength(int64(len(content))))
+ // Read from a *bytes.Reader
+ return NewReader(bytes.NewReader(content), opts...)
+}
+
+// FromString returns an io.Reader from the given string content.
+func FromString(content string, opts ...metadata.HeaderOption) Reader {
+ // Register the Content-Length
+ opts = append(opts, metadata.WithContentLength(int64(len(content))))
+ // Read from a *strings.Reader
+ return NewReader(strings.NewReader(content), opts...)
+}
+
+/*func From/ToHTTPResponse(resp *http.Response, opts ...metadata.HeaderOption) Reader {
+ TODO
+}*/
+
+func ToStdout(opts ...metadata.HeaderOption) Writer {
+ return ToFile(stdoutPath, opts...)
+}
+func ToStderr(opts ...metadata.HeaderOption) Writer {
+ return ToFile(stderrPath, opts...)
+}
+func ToBuffer(buf *bytes.Buffer, opts ...metadata.HeaderOption) Writer {
+ return NewWriter(buf, opts...)
+}
+
+func ToFile(filePath string, opts ...metadata.HeaderOption) Writer {
+ // Shorthands for pipe IO
+ if filePath == "-" || filePath == stdoutPath {
+ // Mark the target as /dev/stdout
+ opts = append(opts, metadata.WithContentLocation(stdoutPath))
+ return NewWriter(os.Stdout, opts...)
+ }
+ if filePath == stderrPath {
+ // Mark the target as /dev/stderr
+ opts = append(opts, metadata.WithContentLocation(stderrPath))
+ return NewWriter(os.Stderr, opts...)
+ }
+
+ // Make sure the path is absolute
+ // TODO: Maybe we actually support "relative" paths as a separate type; and not modify
+ filePath, err := filepath.Abs(filePath)
+ if err != nil {
+ return newErrWriter(err, opts...)
+ }
+ // Report the file path in the X-Content-Location header
+ opts = append(opts, metadata.WithContentLocation(filePath))
+
+ // Make sure all directories are created
+ if err := os.MkdirAll(filepath.Dir(filePath), 0755); err != nil {
+ return newErrWriter(err, opts...)
+ }
+
+ // Create or truncate the file
+ f, err := os.Create(filePath)
+ if err != nil {
+ return newErrWriter(err, opts...)
+ }
+
+ // Register the Content-Length header
+ fi, err := f.Stat()
+ if err != nil {
+ return newErrWriter(err, opts...)
+ }
+ opts = append(opts, metadata.WithContentLength(fi.Size()))
+
+ return NewWriter(f, opts...)
+}
+
+func newErrWriter(err error, opts ...metadata.HeaderOption) Writer {
+ return NewWriter(&errWriter{err}, opts...)
+}
+
+type errWriter struct{ err error }
+
+func (w *errWriter) Write([]byte) (int, error) { return 0, w.err }
diff --git a/pkg/content/errors.go b/pkg/content/errors.go
new file mode 100644
index 00000000..164e3c89
--- /dev/null
+++ b/pkg/content/errors.go
@@ -0,0 +1,43 @@
+package content
+
+import (
+ "fmt"
+
+ "github.com/weaveworks/libgitops/pkg/util/structerr"
+)
+
+// Enforce all struct errors implementing structerr.StructError
+var _ structerr.StructError = &UnsupportedContentTypeError{}
+
+// ErrUnsupportedContentType creates a new *UnsupportedContentTypeError
+func ErrUnsupportedContentType(unsupported ContentType, supported ...ContentType) *UnsupportedContentTypeError {
+ return &UnsupportedContentTypeError{Unsupported: unsupported, Supported: supported}
+}
+
+// UnsupportedContentTypeError describes that the supplied content type is not supported by an
+// implementation handling different content types.
+//
+// This error can be checked for equality using errors.Is(err, &UnsupportedContentTypeError{})
+type UnsupportedContentTypeError struct {
+ // Unsupported is the content type that was given but not supported
+ // +required
+ Unsupported ContentType
+ // Supported is optional; if len(Supported) != 0, it lists the content types that indeed
+ // are supported by the implementation. If len(Supported) == 0, it should not be used
+ // as an indicator.
+ // +optional
+ Supported []ContentType
+}
+
+func (e *UnsupportedContentTypeError) Error() string {
+ msg := fmt.Sprintf("unsupported content type: %q", e.Unsupported)
+ if len(e.Supported) != 0 {
+ msg = fmt.Sprintf("%s. supported content types: %v", msg, e.Supported)
+ }
+ return msg
+}
+
+func (e *UnsupportedContentTypeError) Is(target error) bool {
+ _, ok := target.(*UnsupportedContentTypeError)
+ return ok
+}
diff --git a/pkg/content/interfaces.go b/pkg/content/interfaces.go
new file mode 100644
index 00000000..3c85964f
--- /dev/null
+++ b/pkg/content/interfaces.go
@@ -0,0 +1,139 @@
+package content
+
+import (
+ "context"
+ "fmt"
+ "io"
+
+ "github.com/weaveworks/libgitops/pkg/content/metadata"
+)
+
+var _ fmt.Stringer = ContentType("")
+
+type ContentType string
+
+const (
+ ContentTypeYAML ContentType = "application/yaml"
+ ContentTypeJSON ContentType = "application/json"
+)
+
+func (ct ContentType) ContentType() ContentType { return ct }
+func (ct ContentType) String() string { return string(ct) }
+
+type ContentTypes []ContentType
+
+func (cts ContentTypes) Has(want ContentType) bool {
+ for _, ct := range cts {
+ if ct == want {
+ return true
+ }
+ }
+ return false
+}
+
+func WithContentType(ct ContentType) metadata.HeaderOption {
+ return metadata.SetOption(metadata.ContentTypeKey, ct.String())
+}
+
+type ContentTyped interface {
+ ContentType() ContentType
+}
+
+type ContentTypeSupporter interface {
+ // Order _might_ carry a meaning
+ SupportedContentTypes() ContentTypes
+}
+
+// underlying is the underlying stream of the Reader.
+// If the returned io.Reader does not implement io.Closer,
+// the underlying.Close() method will be re-used.
+type WrapReaderFunc func(underlying io.ReadCloser) io.Reader
+
+type WrapWriterFunc func(underlying io.WriteCloser) io.Writer
+
+type WrapReaderToSegmentFunc func(underlying io.ReadCloser) RawSegmentReader
+
+// Reader is a tracing-capable and metadata-bound io.Reader and io.Closer
+// wrapper. It is NOT thread-safe by default. It supports introspection
+// of composite ReadClosers. The TracerProvider from the given context
+// is used.
+//
+// The Reader reads the current span from the given context, and uses that
+// span's TracerProvider to create a Tracer and then also a new Span for
+// the current operation.
+type Reader interface {
+ // These call the underlying Set/ClearContext functions before/after
+ // reads and closes, and then uses the underlying io.ReadCloser.
+ // If the underlying Reader doesn't support closing, the returned
+ // Close method will only log a "CloseNoop" trace and exit with err == nil.
+ WithContext(ctx context.Context) io.ReadCloser
+
+ // This reader supports registering metadata about the content it
+ // is reading.
+ MetadataContainer
+
+ // Wrap returns a new Reader with io.ReadCloser B that reads from
+ // the current Reader's underlying io.ReadCloser A. If the returned
+ // B is an io.ReadCloser or this Reader's HasCloser() is true,
+ // HasCloser() of the returned Reader will be true, otherwise false.
+ Wrap(fn WrapReaderFunc) Reader
+ WrapSegment(fn WrapReaderToSegmentFunc) SegmentReader
+}
+
+type RawSegmentReader interface {
+ Read() ([]byte, error)
+}
+
+type ClosableRawSegmentReader interface {
+ RawSegmentReader
+ io.Closer
+}
+
+type SegmentReader interface {
+ WithContext(ctx context.Context) ClosableRawSegmentReader
+
+ MetadataContainer
+}
+
+// In the future, one can implement a WrapSegment function that is of
+// the following form:
+// WrapSegment(name string, fn WrapSegmentFunc) SegmentReader
+// where WrapSegmentFunc is func(underlying ClosableRawSegmentReader) RawSegmentReader
+// This allows chaining simple composite SegmentReaders
+
+type Writer interface {
+ WithContext(ctx context.Context) io.WriteCloser
+
+ // This writer supports registering metadata about the content it
+ // is writing and the destination it is writing to.
+ MetadataContainer
+
+ Wrap(fn WrapWriterFunc) Writer
+}
+
+type readerInternal interface {
+ Reader
+ RawReader() io.Reader
+ RawCloser() io.Closer
+}
+
+type segmentReaderInternal interface {
+ SegmentReader
+ RawSegmentReader() RawSegmentReader
+ RawCloser() io.Closer
+}
+
+type writerInternal interface {
+ Writer
+ RawWriter() io.Writer
+ RawCloser() io.Closer
+}
+
+// The internal implementation structs should implement the
+// ...Internal interfaces, in order to expose their raw, underlying resources
+// just in case it is _really_ needed upstream (e.g. for testing). It is not
+// exposed by default in the interface to avoid showing up in Godoc, as it
+// most often shouldn't be used.
+var _ readerInternal = &reader{}
+var _ segmentReaderInternal = &segmentReader{}
+var _ writerInternal = &writer{}
diff --git a/pkg/content/metadata.go b/pkg/content/metadata.go
new file mode 100644
index 00000000..a17f3f16
--- /dev/null
+++ b/pkg/content/metadata.go
@@ -0,0 +1,101 @@
+package content
+
+import (
+ "encoding/json"
+ "net/textproto"
+ "net/url"
+
+ "github.com/weaveworks/libgitops/pkg/content/metadata"
+)
+
+// Metadata is the interface that's common to contentMetadataOptions and a wrapper
+// around a HTTP request.
+type Metadata interface {
+ metadata.Header
+ metadata.HeaderOption
+
+ // Apply applies the given Options to itself and returns itself, without
+ // any deep-copying.
+ Apply(opts ...metadata.HeaderOption) Metadata
+ // ContentLength retrieves the standard "Content-Length" header
+ ContentLength() (int64, bool)
+ // ContentType retrieves the standard "Content-Type" header
+ ContentType() (ContentType, bool)
+ // ContentLocation retrieves the custom "X-Content-Location" header
+ ContentLocation() (*url.URL, bool)
+
+ // Clone makes a deep copy of the Metadata
+ // TODO: Do we need this anymore?
+ Clone() Metadata
+
+ ToContainer() MetadataContainer
+}
+
+var _ Metadata = contentMetadata{}
+
+var _ json.Marshaler = contentMetadata{}
+
+func (m contentMetadata) MarshalJSON() ([]byte, error) {
+ return json.Marshal(m.MIMEHeader)
+}
+
+func (m contentMetadata) ApplyToHeader(target metadata.Header) {
+ for k, vals := range m.MIMEHeader {
+ for i, val := range vals {
+ if i == 0 {
+ target.Set(k, val)
+ } else {
+ target.Add(k, val)
+ }
+ }
+ }
+}
+
+func (m contentMetadata) Apply(opts ...metadata.HeaderOption) Metadata {
+ for _, opt := range opts {
+ opt.ApplyToHeader(m)
+ }
+ return m
+}
+
+func (m contentMetadata) ContentLength() (int64, bool) {
+ return metadata.GetInt64(m, metadata.ContentLengthKey)
+}
+
+func (m contentMetadata) ContentType() (ContentType, bool) {
+ ct, ok := metadata.GetString(m, metadata.ContentTypeKey)
+ return ContentType(ct), ok
+}
+
+func (m contentMetadata) ContentLocation() (*url.URL, bool) {
+ return metadata.GetURL(m, metadata.XContentLocationKey)
+}
+
+func (m contentMetadata) ToContainer() MetadataContainer {
+ return &metadataContainer{m}
+}
+
+func (m contentMetadata) Clone() Metadata {
+ m2 := make(textproto.MIMEHeader, len(m.MIMEHeader))
+ for k, v := range m.MIMEHeader {
+ m2[k] = v
+ }
+ return contentMetadata{m2}
+}
+
+type MetadataContainer interface {
+ // ContentMetadata
+ ContentMetadata() Metadata
+}
+
+func NewMetadata(opts ...metadata.HeaderOption) Metadata {
+ return contentMetadata{MIMEHeader: textproto.MIMEHeader{}}.Apply(opts...)
+}
+
+type contentMetadata struct {
+ textproto.MIMEHeader
+}
+
+type metadataContainer struct{ m Metadata }
+
+func (b *metadataContainer) ContentMetadata() Metadata { return b.m }
diff --git a/pkg/content/metadata/metadata.go b/pkg/content/metadata/metadata.go
new file mode 100644
index 00000000..062a2b7b
--- /dev/null
+++ b/pkg/content/metadata/metadata.go
@@ -0,0 +1,157 @@
+package metadata
+
+import (
+ "mime"
+ "net/textproto"
+ "net/url"
+ "strconv"
+ "strings"
+)
+
+/*
+ Metadata origin:
+
+ content.FromFile -> content.Reader
+ - X-Content-Location
+ - Content-Length
+
+ content.FromBytes -> content.Reader
+ - Content-Length
+
+ content.FromString -> content.Reader
+ - Content-Length
+
+ content.ToFile -> content.Writer
+ - X-Content-Location
+
+ content.ToEmptyBuffer -> content.Writer
+
+ frame.newYAMLReader -> frame.Reader
+ - Content-Type => YAML
+
+ frame.newJSONReader -> frame.Reader
+ - Content-Type => JSON
+
+ frame.newRecognizingReader -> frame.Reader
+ - If Content-Type is set, try use FramingType == ContentType
+ - If X-Content-Location is set, try deduce ContentType from that
+ - Peek the buffer, and check if JSON
+
+*/
+
+//func NewMetadataContainer(m Metadata) MetadataContainer { return &MetadataContainer{m} }
+
+const (
+ XContentLocationKey = "X-Content-Location"
+ //XFramingTypeKey = "X-Framing-Type"
+
+ ContentLengthKey = "Content-Length"
+ ContentTypeKey = "Content-Type"
+ AcceptKey = "Accept"
+)
+
+type HeaderOption interface {
+ // Rename to ApplyMetadataHeader?
+ ApplyToHeader(target Header)
+}
+
+/*func NewContentTypeOption(ct ContentType) setHeaderOption {
+ return setHeaderOption{Key: ContentLengthKey, Value: ct.String()}
+}*/
+
+var _ HeaderOption = setHeaderOption{}
+
+func SetOption(k, v string) HeaderOption {
+ return setHeaderOption{Key: k, Value: v}
+}
+
+func WithContentLength(len int64) HeaderOption {
+ return SetOption(ContentLengthKey, strconv.FormatInt(len, 10))
+}
+
+func WithContentLocation(loc string) HeaderOption {
+ return SetOption(XContentLocationKey, loc)
+}
+
+func WithAccept(accepts ...string) HeaderOption {
+ return addHeaderOption{Key: AcceptKey, Values: accepts}
+}
+
+type setHeaderOption struct{ Key, Value string }
+
+func (o setHeaderOption) ApplyToHeader(target Header) {
+ target.Set(o.Key, o.Value)
+}
+
+type addHeaderOption struct {
+ Key string
+ Values []string
+}
+
+func (o addHeaderOption) ApplyToHeader(target Header) {
+ for _, val := range o.Values {
+ target.Add(o.Key, val)
+ }
+}
+
+// Make sure the interface is compatible with the targeted textproto.MIMEHeader
+var _ Header = textproto.MIMEHeader{}
+
+// Express the string-string map interface of the net/textproto.Header map
+type Header interface {
+ Add(key, value string)
+ Set(key, value string)
+ Get(key string) string
+ Values(key string) []string
+ Del(key string)
+}
+
+// TODO: Public or private?
+
+func GetString(m Header, key string) (string, bool) {
+ if len(m.Values(key)) == 0 {
+ return "", false
+ }
+ return m.Get(key), true
+}
+
+func GetInt64(m Header, key string) (int64, bool) {
+ i, err := strconv.ParseInt(m.Get(key), 10, 64)
+ if err != nil {
+ return 0, false
+ }
+ return i, true
+}
+
+func GetURL(m Header, key string) (*url.URL, bool) {
+ str, ok := GetString(m, key)
+ if !ok {
+ return nil, false
+ }
+ u, err := url.Parse(str)
+ if err != nil {
+ return nil, false
+ }
+ return u, true
+}
+
+func GetMediaTypes(m Header, key string) (mediaTypes []string, err error) {
+ for _, commaSepVal := range m.Values(key) {
+ for _, mediaTypeStr := range strings.Split(commaSepVal, ",") {
+ mediaType, _, err := mime.ParseMediaType(mediaTypeStr)
+ if err != nil {
+ return nil, err
+ }
+ mediaTypes = append(mediaTypes, mediaType)
+ }
+ }
+ return
+}
+
+/*
+ Content-Encoding
+ Content-Length
+ Content-Type
+ Last-Modified
+ ETag
+*/
diff --git a/pkg/content/metadata/metadata_test.go b/pkg/content/metadata/metadata_test.go
new file mode 100644
index 00000000..8d2a77f1
--- /dev/null
+++ b/pkg/content/metadata/metadata_test.go
@@ -0,0 +1,125 @@
+package metadata
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "mime"
+ "net/textproto"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "k8s.io/apimachinery/pkg/runtime/serializer/json"
+ "sigs.k8s.io/kustomize/kyaml/kio"
+ "sigs.k8s.io/yaml"
+)
+
+func TestMIME(t *testing.T) {
+ for _, part := range strings.Split("text/html, application/xhtml+xml, application/xml;q=0.9, image/webp, */*;q=0.8", ",") {
+ t.Error(mime.ParseMediaType(part))
+ }
+}
+
+func TestTypePrint(t *testing.T) {
+ t.Error(fmt.Printf("%T\n", bytes.NewBuffer(nil)))
+ t.Error(fmt.Printf("%T\n", json.Framer.NewFrameReader(nil)))
+}
+
+func TestK8sYAML(t *testing.T) {
+ c := []byte("\n---\n\n---\n f : fo\n\n---\n \n---\nbar: true") //[]byte("\n---\nfoo:\n- bar: true")
+
+ /*var obj interface{}
+ b, err := yaml.YAMLToJSON(c)
+ t.Error(string(b), err)
+ err = yaml.Unmarshal(c, &obj)*/
+ /*for _, subobj := range obj.([]interface{}) {
+ t.Error(subobj.(map[string]interface{}))
+ }*/
+ //t.Error(obj, err)
+ /*n := goyaml.Node{}
+ err = goyaml.Unmarshal(c, &n)
+ nb, err2 := goyaml.Marshal(n)
+ t.Error(string(nb), err, err2)*/
+ rn, err := kio.FromBytes(c)
+ for _, n := range rn {
+ t.Error(n.MustString())
+ }
+ t.Error(err)
+}
+
+func TestBufio(t *testing.T) {
+ r := strings.NewReader("foo: bar")
+ br := bufio.NewReaderSize(r, 2048)
+ c, err := br.Peek(2048)
+ t.Error(string(c), err)
+}
+
+const fooYAML = `
+
+---
+
+---
+baz: 123
+foo: bar
+bar: true
+---
+foo: bar
+bar: true
+
+`
+
+func TestFoo(t *testing.T) {
+ //u, err := url.Parse("file:///foo/bar")
+ /*u := &url.URL{
+ //Scheme: "file",
+ Path: ".",
+ }
+ t.Error(u, nil, u.RequestURI(), u.Host, u.Scheme)*/
+
+ obj := map[string]interface{}{}
+
+ err := yaml.UnmarshalStrict([]byte(fooYAML), &obj)
+ t.Errorf("%+v %v", obj, err)
+}
+
+func TestGetMediaTypes(t *testing.T) {
+ tests := []struct {
+ name string
+ opts []HeaderOption
+ key string
+ wantMediaTypes []string
+ wantErr error
+ }{
+ {
+ name: "multiple keys, and values in one key",
+ opts: []HeaderOption{
+ WithAccept("application/yaml", "application/xml"),
+ WithAccept("application/json"),
+ WithAccept("text/html, application/xhtml+xml, application/xml;q=0.9, image/webp, */*;q=0.8"),
+ },
+ key: AcceptKey,
+ wantMediaTypes: []string{
+ "application/yaml",
+ "application/xml",
+ "application/json",
+ "text/html",
+ "application/xhtml+xml",
+ "application/xml",
+ "image/webp",
+ "*/*",
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ h := textproto.MIMEHeader{}
+ for _, opt := range tt.opts {
+ opt.ApplyToHeader(h)
+ }
+ gotMediaTypes, err := GetMediaTypes(h, tt.key)
+ assert.Equal(t, tt.wantMediaTypes, gotMediaTypes)
+ assert.ErrorIs(t, err, tt.wantErr)
+ })
+ }
+}
diff --git a/pkg/content/reader.go b/pkg/content/reader.go
new file mode 100644
index 00000000..e417096b
--- /dev/null
+++ b/pkg/content/reader.go
@@ -0,0 +1,244 @@
+package content
+
+import (
+ "context"
+ "errors"
+ "io"
+ "os"
+
+ "github.com/weaveworks/libgitops/pkg/content/metadata"
+ "github.com/weaveworks/libgitops/pkg/tracing"
+ "github.com/weaveworks/libgitops/pkg/util/compositeio"
+ "github.com/weaveworks/libgitops/pkg/util/limitedio"
+ "go.opentelemetry.io/otel/trace"
+)
+
+type contextLock interface {
+ setContext(ctx context.Context)
+ clearContext()
+}
+
+type contextLockImpl struct {
+ ctx context.Context
+}
+
+func (l *contextLockImpl) setContext(ctx context.Context) { l.ctx = ctx }
+func (l *contextLockImpl) clearContext() { l.ctx = nil }
+
+type readContextLockImpl struct {
+ contextLockImpl
+ r io.Reader
+ metaGetter MetadataContainer
+ underlyingLock contextLock
+}
+
+func (r *readContextLockImpl) Read(p []byte) (n int, err error) {
+ ft := tracing.FromContext(r.ctx, r.r)
+ err = ft.TraceFunc(r.ctx, "Read", func(ctx context.Context, span trace.Span) error {
+ var tmperr error
+ if r.underlyingLock != nil {
+ r.underlyingLock.setContext(ctx)
+ }
+ n, tmperr = r.r.Read(p)
+ if r.underlyingLock != nil {
+ r.underlyingLock.clearContext()
+ }
+ // Register metadata in the span
+ span.SetAttributes(SpanAttrByteContentCap(p[:n], len(p))...)
+ return tmperr
+ }, trace.WithAttributes(SpanAttrContentMetadata(r.metaGetter.ContentMetadata()))).RegisterCustom(SpanRegisterReadError)
+ return
+}
+
+type closeContextLockImpl struct {
+ contextLockImpl
+ c io.Closer
+ metaGetter MetadataContainer
+ underlyingLock contextLock
+}
+
+func (c *closeContextLockImpl) Close() error {
+ spanName := "Close"
+ if c.c == nil {
+ spanName = "CloseNoop"
+ }
+
+ ft := tracing.FromContext(c.ctx, c.c)
+ return ft.TraceFunc(c.ctx, spanName, func(ctx context.Context, _ trace.Span) error {
+ // Don't close if c.c is nil
+ if c.c == nil {
+ return nil
+ }
+
+ if c.underlyingLock != nil {
+ c.underlyingLock.setContext(ctx)
+ }
+ // Close the underlying resource
+ err := c.c.Close()
+ if c.underlyingLock != nil {
+ c.underlyingLock.clearContext()
+ }
+ return err
+ }, trace.WithAttributes(SpanAttrContentMetadata(c.metaGetter.ContentMetadata()))).Register()
+}
+
+type reader struct {
+ MetadataContainer
+ read *readContextLockImpl
+ close *closeContextLockImpl
+}
+
+type readerWithContext struct {
+ read *readContextLockImpl
+ ctx context.Context
+}
+
+func (r *readerWithContext) Read(p []byte) (n int, err error) {
+ r.read.setContext(r.ctx)
+ n, err = r.read.Read(p)
+ r.read.clearContext()
+ return
+}
+
+type closerWithContext struct {
+ close *closeContextLockImpl
+ ctx context.Context
+}
+
+func (r *closerWithContext) Close() error {
+ r.close.setContext(r.ctx)
+ err := r.close.Close()
+ r.close.clearContext()
+ return err
+}
+
+func (r *reader) WithContext(ctx context.Context) io.ReadCloser {
+ return compositeio.ReadCloser(&readerWithContext{r.read, ctx}, &closerWithContext{r.close, ctx})
+}
+func (r *reader) RawReader() io.Reader { return r.read.r }
+func (r *reader) RawCloser() io.Closer { return r.close.c }
+
+// Maybe allow adding extra attributes at the end?
+func (r *reader) Wrap(wrapFn WrapReaderFunc) Reader {
+ newReader := wrapFn(compositeio.ReadCloser(r.read, r.close))
+ if newReader == nil {
+ panic("newReader must not be nil")
+ }
+ // If an io.Closer is not returned, close this
+ // Reader's stream instead. Importantly enough,
+ // a trace will be registered for both this
+ // Reader, and the returned one.
+ newCloser, ok := newReader.(io.Closer)
+ if !ok {
+ newCloser = r.close
+ }
+
+ mb := r.ContentMetadata().Clone().ToContainer()
+
+ return &reader{
+ MetadataContainer: mb,
+ read: &readContextLockImpl{
+ r: newReader,
+ metaGetter: mb,
+ underlyingLock: r.read,
+ },
+ close: &closeContextLockImpl{
+ c: newCloser,
+ metaGetter: mb,
+ underlyingLock: r.close,
+ },
+ }
+}
+
+func (r *reader) WrapSegment(wrapFn WrapReaderToSegmentFunc) SegmentReader {
+ newSegmentReader := wrapFn(compositeio.ReadCloser(r.read, r.close))
+ if newSegmentReader == nil {
+ panic("newSegmentReader must not be nil")
+ }
+
+ // If an io.Closer is not returned, close this
+ // Reader's stream instead. Importantly enough,
+ // a trace will be registered for both this
+ // Reader, and the returned one.
+ newCloser, ok := newSegmentReader.(io.Closer)
+ if !ok {
+ newCloser = r.close
+ }
+
+ mb := r.ContentMetadata().Clone().ToContainer()
+
+ return &segmentReader{
+ MetadataContainer: mb,
+ read: &readSegmentContextLockImpl{
+ r: newSegmentReader,
+ metaGetter: mb,
+ underlyingLock: r.read,
+ },
+ close: &closeContextLockImpl{
+ c: newCloser,
+ metaGetter: mb,
+ underlyingLock: r.close,
+ },
+ }
+}
+
+func NewReader(r io.Reader, opts ...metadata.HeaderOption) Reader {
+ // If it already is a Reader, just return it
+ rr, ok := r.(Reader)
+ if ok {
+ return rr
+ }
+
+ // Use the closer if available
+ c, _ := r.(io.Closer)
+ // Never close stdio
+ if isStdio(r) {
+ c = nil
+ }
+ mb := NewMetadata(opts...).ToContainer()
+
+ return &reader{
+ MetadataContainer: mb,
+ read: &readContextLockImpl{
+ r: r,
+ metaGetter: mb,
+ // underlyingLock is nil
+ },
+ close: &closeContextLockImpl{
+ c: c,
+ metaGetter: mb,
+ // underlyingLock is nil
+ },
+ }
+}
+
+func isStdio(s interface{}) bool {
+ f, ok := s.(*os.File)
+ if !ok {
+ return false
+ }
+ return int(f.Fd()) < 3
+}
+
+// SpanRegisterReadError registers io.EOF as an "event", and other errors as "unknown errors" in the trace
+func SpanRegisterReadError(span trace.Span, err error) {
+ // Register the error with the span. EOF is expected at some point,
+ // hence, register that as an event instead of an error
+ if errors.Is(err, io.EOF) {
+ span.AddEvent("EOF")
+ } else if err != nil {
+ span.RecordError(err)
+ }
+}
+
+type ResetCounterFunc func()
+
+func WrapLimited(r Reader, maxFrameSize limitedio.Limit) (Reader, ResetCounterFunc) {
+ var reset ResetCounterFunc
+ limitedR := r.Wrap(func(underlying io.ReadCloser) io.Reader {
+ lr := limitedio.NewReader(underlying, maxFrameSize)
+ reset = lr.ResetCounter
+ return lr
+ })
+ return limitedR, reset
+}
diff --git a/pkg/content/reader_test.go b/pkg/content/reader_test.go
new file mode 100644
index 00000000..98b6aea3
--- /dev/null
+++ b/pkg/content/reader_test.go
@@ -0,0 +1,62 @@
+package content
+
+import (
+ "bytes"
+ "os"
+ "path/filepath"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func Test_isStdio(t *testing.T) {
+ tmp := t.TempDir()
+ f, err := os.Create(filepath.Join(tmp, "foo.txt"))
+ require.Nil(t, err)
+ defer f.Close()
+ tests := []struct {
+ name string
+ in interface{}
+ want bool
+ }{
+ {
+ name: "os.Stdin",
+ in: os.Stdin,
+ want: true,
+ },
+ {
+ name: "os.Stdout",
+ in: os.Stdout,
+ want: true,
+ },
+ {
+ name: "os.Stderr",
+ in: os.Stderr,
+ want: true,
+ },
+ {
+ name: "*bytes.Buffer",
+ in: bytes.NewBufferString("FooBar"),
+ },
+ {
+ name: "*strings.Reader",
+ in: strings.NewReader("FooBar"),
+ },
+ {
+ name: "*strings.Reader",
+ in: strings.NewReader("FooBar"),
+ },
+ {
+ name: "*os.File",
+ in: f,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got := isStdio(tt.in)
+ assert.Equal(t, got, tt.want)
+ })
+ }
+}
diff --git a/pkg/content/recognizing.go b/pkg/content/recognizing.go
new file mode 100644
index 00000000..ed3d198b
--- /dev/null
+++ b/pkg/content/recognizing.go
@@ -0,0 +1,203 @@
+package content
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "errors"
+ "io"
+ "path/filepath"
+
+ "github.com/weaveworks/libgitops/pkg/content/metadata"
+ "github.com/weaveworks/libgitops/pkg/tracing"
+ "github.com/weaveworks/libgitops/pkg/util/compositeio"
+ "go.opentelemetry.io/otel/trace"
+ yamlutil "k8s.io/apimachinery/pkg/util/yaml"
+ "sigs.k8s.io/yaml"
+)
+
+const peekSize = 2048
+
+type ContentTypeRecognizer interface {
+ FromContentMetadata(m Metadata) (ct ContentType, ok bool)
+ FromPeekBytes(peek []byte) (ct ContentType, ok bool)
+
+ // SupportedContentTypes() tells about what ContentTypes are supported by this recognizer
+ ContentTypeSupporter
+}
+
+func NewJSONYAMLRecognizingReader(ctx context.Context, r Reader) (Reader, ContentType, error) {
+ return NewRecognizingReader(ctx, r, NewJSONYAMLContentTypeRecognizer())
+}
+
+func NewRecognizingReader(ctx context.Context, r Reader, ctrec ContentTypeRecognizer) (Reader, ContentType, error) {
+ // If r already has Content-Type set, all good
+ meta := r.ContentMetadata()
+ ct, ok := meta.ContentType()
+ if ok {
+ return r, ct, nil
+ }
+
+ // Try to resolve the Content-Type from the X-Content-Location header
+ ct, ok = ctrec.FromContentMetadata(meta)
+ if ok {
+ meta.Apply(WithContentType(ct))
+ return r, ct, nil
+ }
+
+ var newr Reader
+ err := tracing.FromContext(ctx, "content").TraceFunc(ctx, "NewRecognizingReader",
+ func(ctx context.Context, span trace.Span) error {
+
+ // Use the context to access the io.ReadCloser
+ rc := r.WithContext(ctx)
+ meta := r.ContentMetadata().Clone()
+
+ bufr := bufio.NewReaderSize(rc, peekSize)
+
+ peek, err := bufr.Peek(peekSize)
+ if err != nil && !errors.Is(err, io.EOF) {
+ return err
+ }
+
+ // Write to ct defined earlier, that value will be returned if err == nil
+ ct, ok = ctrec.FromPeekBytes(peek)
+ if !ok {
+ // TODO: Struct error; include the peek in the context too
+ return errors.New("couldn't recognize content type")
+ }
+
+ // Set the right recognized content type
+ meta.Apply(WithContentType(ct))
+
+ // Read from the buffered bufio.Reader, because we have already peeked
+ // data from the underlying rc. Close rc when done.
+ newr = NewReader(compositeio.ReadCloser(bufr, rc), meta)
+ return nil
+ }).Register()
+ if err != nil {
+ return nil, "", err
+ }
+
+ return newr, ct, nil
+}
+
+func NewRecognizingWriter(w Writer, ctrec ContentTypeRecognizer) (Writer, ContentType, error) {
+ // If r already has Content-Type set, all good
+ meta := w.ContentMetadata()
+ ct, ok := meta.ContentType()
+ if ok {
+ return w, ct, nil
+ }
+
+ // Try to resolve the Content-Type from the X-Content-Location header
+ ct, ok = ctrec.FromContentMetadata(meta)
+ if ok {
+ meta.Apply(WithContentType(ct))
+ return w, ct, nil
+ }
+
+ // Negotiate the Accept header
+ ct, ok = negotiateAccept(meta, ctrec.SupportedContentTypes())
+ if ok {
+ meta.Apply(WithContentType(ct))
+ return w, ct, nil
+ }
+
+ return nil, "", errors.New("couldn't recognize content type")
+}
+
+const acceptAll ContentType = "*/*"
+
+func negotiateAccept(meta Metadata, supportedTypes []ContentType) (ContentType, bool) {
+ accepts, err := metadata.GetMediaTypes(meta, metadata.AcceptKey)
+ if err != nil {
+ return "", false
+ }
+
+ // prioritize the order that the metadata is asking for. supported is in priority order too
+ for _, accept := range accepts {
+ for _, supported := range supportedTypes {
+ if matchesAccept(ContentType(accept), supported) {
+ return supported, true
+ }
+ }
+ }
+ return "", false
+}
+
+func matchesAccept(accept, supported ContentType) bool {
+ if accept == acceptAll {
+ return true
+ }
+ return accept == supported
+}
+
+func NewJSONYAMLContentTypeRecognizer() ContentTypeRecognizer {
+ return jsonYAMLContentTypeRecognizer{}
+}
+
+type jsonYAMLContentTypeRecognizer struct {
+}
+
+var defaultExtMap = map[string]ContentType{
+ ".json": ContentTypeJSON,
+ ".yml": ContentTypeYAML,
+ ".yaml": ContentTypeYAML,
+}
+
+func (jsonYAMLContentTypeRecognizer) FromContentMetadata(m Metadata) (ContentType, bool) {
+ loc, ok := metadata.GetString(m, metadata.XContentLocationKey)
+ if !ok {
+ return "", false
+ }
+ ext := filepath.Ext(loc)
+ ct, ok := defaultExtMap[ext]
+ if !ok {
+ return "", false
+ }
+ return ct, true
+}
+
+func (jsonYAMLContentTypeRecognizer) FromPeekBytes(peek []byte) (ContentType, bool) {
+ // Check if this is JSON or YAML
+ if yamlutil.IsJSONBuffer(peek) {
+ return ContentTypeJSON, true
+ } else if isYAML(peek) {
+ return ContentTypeYAML, true
+ }
+ return "", false
+}
+
+func (jsonYAMLContentTypeRecognizer) SupportedContentTypes() ContentTypes {
+ return []ContentType{ContentTypeJSON, ContentTypeYAML}
+}
+
+func isYAML(peek []byte) bool {
+ line, err := getLine(peek)
+ if err != nil {
+ return false
+ }
+
+ o := map[string]interface{}{}
+ err = yaml.Unmarshal(line, &o)
+ return err == nil
+}
+
+func getLine(peek []byte) ([]byte, error) {
+ s := bufio.NewScanner(bytes.NewReader(peek))
+ // TODO: Support very long lines? (over 65k bytes?) Probably not
+ for s.Scan() {
+ t := bytes.TrimSpace(s.Bytes())
+ // TODO: Ignore comments
+ if len(t) == 0 || bytes.Equal(t, []byte("---")) {
+ continue
+ }
+ return t, nil
+ }
+ // Return a possible scanning error
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+ return nil, errors.New("couldn't find non-empty line in scanner")
+}
diff --git a/pkg/content/recognizing_reader_test.go b/pkg/content/recognizing_reader_test.go
new file mode 100644
index 00000000..804f237a
--- /dev/null
+++ b/pkg/content/recognizing_reader_test.go
@@ -0,0 +1,96 @@
+package content
+
+import (
+ "bufio"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func Test_isYAML(t *testing.T) {
+ tests := []struct {
+ name string
+ peek string
+ want bool
+ }{
+ {
+ name: "field mapping",
+ peek: "foo: bar\n",
+ want: true,
+ },
+ {
+ name: "spaces and other empty documents",
+ peek: `---
+
+
+---
+---
+foo: bar`,
+ want: true,
+ },
+ {
+ name: "bool",
+ peek: "foo: true",
+ want: true,
+ },
+ {
+ name: "int",
+ peek: "foo: 5",
+ want: true,
+ },
+ {
+ name: "float",
+ peek: "foo: 5.1",
+ want: true,
+ },
+ {
+ name: "float",
+ peek: "foo: null",
+ want: true,
+ },
+ {
+ name: "beginning of struct",
+ peek: "foo:",
+ want: true,
+ },
+ {
+ name: "scalar null",
+ peek: `null`,
+ want: true,
+ },
+ {
+ name: "nothing",
+ },
+ {
+ name: "line overflow",
+ peek: strings.Repeat("a", bufio.MaxScanTokenSize) + ": true",
+ },
+
+ {
+ name: "list element struct",
+ peek: "- foo: bar",
+ },
+ {
+ name: "list element string",
+ peek: "- foo",
+ },
+ {
+ name: "scalar string",
+ peek: `foo`,
+ },
+ {
+ name: "scalar int",
+ peek: `5`,
+ },
+ {
+ name: "scalar float",
+ peek: `5.1`,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ assert.Equal(t, isYAML([]byte(tt.peek)), tt.want)
+ })
+ }
+}
diff --git a/pkg/content/recognizing_test.go b/pkg/content/recognizing_test.go
new file mode 100644
index 00000000..0350a6c7
--- /dev/null
+++ b/pkg/content/recognizing_test.go
@@ -0,0 +1,69 @@
+package content
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/weaveworks/libgitops/pkg/content/metadata"
+)
+
+func Test_negotiateAccept(t *testing.T) {
+ tests := []struct {
+ name string
+ accepts []string
+ supported []ContentType
+ want ContentType
+ wantOk bool
+ }{
+ {
+ name: "accepts has higher priority than supported",
+ // application/bar is not supported, but the second highest priority does
+ accepts: []string{"application/bar", "application/json", "application/yaml"},
+ supported: []ContentType{"application/foo", "application/yaml", "application/json"},
+ want: "application/json",
+ wantOk: true,
+ },
+ {
+ name: "no accepts should give empty result",
+ supported: []ContentType{"application/foo", "application/yaml", "application/json"},
+ },
+ {
+ name: "no supported should give empty result",
+ accepts: []string{"application/bar", "application/json", "application/yaml"},
+ },
+ {
+ name: "invalid accept should give empty result",
+ accepts: []string{"///;;app/bar", "application/json", "application/yaml"},
+ supported: []ContentType{"application/foo", "application/yaml", "application/json"},
+ },
+ {
+ name: "ignore extra parameters, e.g. q=0.8",
+ accepts: []string{"application/bar", "application/json;q=0.8", "application/yaml"},
+ supported: []ContentType{"application/foo", "application/yaml", "application/json"},
+ want: "application/json",
+ wantOk: true,
+ },
+ {
+ name: "allow comma separation",
+ accepts: []string{"application/bar, application/json;q=0.8", "application/yaml"},
+ supported: []ContentType{"application/foo", "application/yaml", "application/json"},
+ want: "application/json",
+ wantOk: true,
+ },
+ {
+ name: "accept all; choose the preferred one",
+ accepts: []string{"application/bar, */*;q=0.7", "application/yaml"},
+ supported: []ContentType{"application/foo", "application/yaml", "application/json"},
+ want: "application/foo",
+ wantOk: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ m := NewMetadata(metadata.WithAccept(tt.accepts...))
+ got, gotOk := negotiateAccept(m, tt.supported)
+ assert.Equal(t, tt.want, got)
+ assert.Equal(t, tt.wantOk, gotOk)
+ })
+ }
+}
diff --git a/pkg/content/segment_reader.go b/pkg/content/segment_reader.go
new file mode 100644
index 00000000..62f408ce
--- /dev/null
+++ b/pkg/content/segment_reader.go
@@ -0,0 +1,63 @@
+package content
+
+import (
+ "context"
+ "io"
+
+ "github.com/weaveworks/libgitops/pkg/tracing"
+ "go.opentelemetry.io/otel/trace"
+)
+
+type segmentReader struct {
+ MetadataContainer
+ read *readSegmentContextLockImpl
+ close *closeContextLockImpl
+}
+
+func (r *segmentReader) WithContext(ctx context.Context) ClosableRawSegmentReader {
+ return closableRawSegmentReader{&segmentReaderWithContext{r.read, ctx}, &closerWithContext{r.close, ctx}}
+}
+
+func (r *segmentReader) RawSegmentReader() RawSegmentReader { return r.read.r }
+func (r *segmentReader) RawCloser() io.Closer { return r.close.c }
+
+type segmentReaderWithContext struct {
+ read *readSegmentContextLockImpl
+ ctx context.Context
+}
+
+func (r *segmentReaderWithContext) Read() (content []byte, err error) {
+ r.read.setContext(r.ctx)
+ content, err = r.read.Read()
+ r.read.clearContext()
+ return
+}
+
+type readSegmentContextLockImpl struct {
+ contextLockImpl
+ r RawSegmentReader
+ metaGetter MetadataContainer
+ underlyingLock contextLock
+}
+
+func (r *readSegmentContextLockImpl) Read() (content []byte, err error) {
+ ft := tracing.FromContext(r.ctx, r.r)
+ err = ft.TraceFunc(r.ctx, "ReadSegment", func(ctx context.Context, span trace.Span) error {
+ var tmperr error
+ if r.underlyingLock != nil {
+ r.underlyingLock.setContext(ctx)
+ }
+ content, tmperr = r.r.Read()
+ if r.underlyingLock != nil {
+ r.underlyingLock.clearContext()
+ }
+ span.SetAttributes(SpanAttrByteContent(content)...)
+ return tmperr
+ }, trace.WithAttributes(SpanAttrContentMetadata(r.metaGetter.ContentMetadata()))).RegisterCustom(SpanRegisterReadError)
+ return
+}
+
+type closableRawSegmentReader struct {
+ RawSegmentReader
+ io.Closer
+}
diff --git a/pkg/content/tracing.go b/pkg/content/tracing.go
new file mode 100644
index 00000000..f11eec83
--- /dev/null
+++ b/pkg/content/tracing.go
@@ -0,0 +1,33 @@
+package content
+
+import "go.opentelemetry.io/otel/attribute"
+
+const (
+ SpanAttributeKeyByteContent = "byteContent"
+ SpanAttributeKeyByteContentLen = "byteContentLength"
+ SpanAttributeKeyByteContentCap = "byteContentCapacity"
+ SpanAttributeKeyContentMetadata = "contentMetadata"
+)
+
+// SpanAttrByteContent registers byteContent and byteContentLength span attributes
+// b should be the byte content that has been e.g. read or written in an io operation
+func SpanAttrByteContent(b []byte) []attribute.KeyValue {
+ return []attribute.KeyValue{
+ attribute.String(SpanAttributeKeyByteContent, string(b)),
+ attribute.Int64(SpanAttributeKeyByteContentLen, int64(len(b))),
+ }
+}
+
+// SpanAttrByteContentCap extends SpanAttrByteContent with a capacity argument
+// cap should be the capacity of e.g. that read or write, i.e. how much
+// could have been read or written.
+func SpanAttrByteContentCap(b []byte, cap int) []attribute.KeyValue {
+ return append(SpanAttrByteContent(b),
+ attribute.Int(SpanAttributeKeyByteContentCap, cap),
+ )
+}
+
+// TODO: This should be used upstream, too, or not?
+func SpanAttrContentMetadata(m Metadata) attribute.KeyValue {
+ return attribute.Any(SpanAttributeKeyContentMetadata, m)
+}
diff --git a/pkg/content/writer.go b/pkg/content/writer.go
new file mode 100644
index 00000000..167346ae
--- /dev/null
+++ b/pkg/content/writer.go
@@ -0,0 +1,121 @@
+package content
+
+import (
+ "context"
+ "io"
+
+ "github.com/weaveworks/libgitops/pkg/content/metadata"
+ "github.com/weaveworks/libgitops/pkg/tracing"
+ "github.com/weaveworks/libgitops/pkg/util/compositeio"
+ "go.opentelemetry.io/otel/trace"
+)
+
+func NewWriter(w io.Writer, opts ...metadata.HeaderOption) Writer {
+ // If it already is a Writer, just return it
+ ww, ok := w.(Writer)
+ if ok {
+ return ww
+ }
+
+ // Use the closer if available
+ c, _ := w.(io.Closer)
+ // Never close stdio
+ if isStdio(w) {
+ c = nil
+ }
+ mb := NewMetadata(opts...).ToContainer()
+
+ return &writer{
+ MetadataContainer: mb,
+ write: &writeContextLockImpl{
+ w: w,
+ metaGetter: mb,
+ // underlyingLock is nil
+ },
+ close: &closeContextLockImpl{
+ c: c,
+ metaGetter: mb,
+ // underlyingLock is nil
+ },
+ }
+}
+
+type writer struct {
+ MetadataContainer
+ write *writeContextLockImpl
+ close *closeContextLockImpl
+}
+
+func (w *writer) WithContext(ctx context.Context) io.WriteCloser {
+ return compositeio.WriteCloser(&writerWithContext{w.write, ctx}, &closerWithContext{w.close, ctx})
+}
+func (w *writer) RawWriter() io.Writer { return w.write.w }
+func (w *writer) RawCloser() io.Closer { return w.close.c }
+
+func (w *writer) Wrap(wrapFn WrapWriterFunc) Writer {
+ newWriter := wrapFn(compositeio.WriteCloser(w.write, w.close))
+ if newWriter == nil {
+ panic("newWriter must not be nil")
+ }
+ // If an io.Closer is not returned, close this
+ // Reader's stream instead. Importantly enough,
+ // a trace will be registered for both this
+ // Reader, and the returned one.
+ newCloser, ok := newWriter.(io.Closer)
+ if !ok {
+ newCloser = w.close
+ }
+
+ mb := w.ContentMetadata().Clone().ToContainer()
+
+ return &writer{
+ MetadataContainer: mb,
+ write: &writeContextLockImpl{
+ w: newWriter,
+ metaGetter: mb,
+ underlyingLock: w.write,
+ },
+ close: &closeContextLockImpl{
+ c: newCloser,
+ metaGetter: mb,
+ underlyingLock: w.close,
+ },
+ }
+}
+
+type writerWithContext struct {
+ write *writeContextLockImpl
+ ctx context.Context
+}
+
+func (w *writerWithContext) Write(p []byte) (n int, err error) {
+ w.write.setContext(w.ctx)
+ n, err = w.write.Write(p)
+ w.write.clearContext()
+ return
+}
+
+type writeContextLockImpl struct {
+ contextLockImpl
+ w io.Writer
+ metaGetter MetadataContainer
+ underlyingLock contextLock
+}
+
+func (r *writeContextLockImpl) Write(p []byte) (n int, err error) {
+ ft := tracing.FromContext(r.ctx, r.w)
+ err = ft.TraceFunc(r.ctx, "Write", func(ctx context.Context, span trace.Span) error {
+ var tmperr error
+ if r.underlyingLock != nil {
+ r.underlyingLock.setContext(ctx)
+ }
+ n, tmperr = r.w.Write(p)
+ if r.underlyingLock != nil {
+ r.underlyingLock.clearContext()
+ }
+ // Register metadata in the span
+ span.SetAttributes(SpanAttrByteContentCap(p[:n], len(p))...)
+ return tmperr
+ }, trace.WithAttributes(SpanAttrContentMetadata(r.metaGetter.ContentMetadata()))).Register()
+ return
+}
diff --git a/pkg/filter/interfaces.go b/pkg/filter/interfaces.go
index 62d3cd3f..a097112b 100644
--- a/pkg/filter/interfaces.go
+++ b/pkg/filter/interfaces.go
@@ -1,48 +1,20 @@
package filter
-import "github.com/weaveworks/libgitops/pkg/runtime"
+import (
+ "errors"
-// ListFilter is an interface for pipe-like list filtering behavior.
-type ListFilter interface {
- // Filter walks through all objects in obj, assesses whether the object
- // matches the filter parameters, and conditionally adds it to the return
- // slice or not. This method can be thought of like an UNIX pipe.
- Filter(objs ...runtime.Object) ([]runtime.Object, error)
-}
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+var (
+ // ErrInvalidFilterParams describes an error where invalid parameters were given
+ // to a filter.
+ ErrInvalidFilterParams = errors.New("invalid parameters given to filter")
+)
// ObjectFilter is an interface for filtering objects one-by-one.
type ObjectFilter interface {
- // Filter takes in one object (at once, per invocation), and returns a
+ // Match takes in one object (at once, per invocation), and returns a
// boolean whether the object matches the filter parameters, or not.
- Filter(obj runtime.Object) (bool, error)
-}
-
-// ObjectToListFilter transforms an ObjectFilter into a ListFilter. If of is nil,
-// this function panics.
-func ObjectToListFilter(of ObjectFilter) ListFilter {
- if of == nil {
- panic("programmer error: of ObjectFilter must not be nil in ObjectToListFilter")
- }
- return &objectToListFilter{of}
-}
-
-type objectToListFilter struct {
- of ObjectFilter
-}
-
-// Filter implements ListFilter, but uses an ObjectFilter for the underlying logic.
-func (f objectToListFilter) Filter(objs ...runtime.Object) (retarr []runtime.Object, err error) {
- // Walk through all objects
- for _, obj := range objs {
- // Match them one-by-one against the ObjectFilter
- match, err := f.of.Filter(obj)
- if err != nil {
- return nil, err
- }
- // If the object matches, include it in the return array
- if match {
- retarr = append(retarr, obj)
- }
- }
- return
+ Match(obj client.Object) (bool, error)
}
diff --git a/pkg/filter/labels.go b/pkg/filter/labels.go
new file mode 100644
index 00000000..24ef9f10
--- /dev/null
+++ b/pkg/filter/labels.go
@@ -0,0 +1,46 @@
+package filter
+
+import (
+ "fmt"
+
+ "k8s.io/apimachinery/pkg/labels"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// LabelsFilter implements ObjectFilter and FilterOption.
+// It also implements client.{List,DeleteAllOf}Option so
+// it can be passed into client.Client.{List,DeleteAllOf}
+// as a way to conveniently filter those lists.
+var _ ObjectFilter = LabelsFilter{}
+var _ FilterOption = LabelsFilter{}
+var _ client.ListOption = LabelsFilter{}
+var _ client.DeleteAllOfOption = LabelsFilter{}
+
+// LabelsFilter is an ObjectFilter that compares metav1.Object.GetLabels()
+// to the LabelSelector field.
+type LabelsFilter struct {
+ // LabelSelector filters results by label. Use SetLabelSelector to
+ // set from raw string form.
+ // +required
+ LabelSelector labels.Selector
+}
+
+// Match implements ObjectFilter
+func (f LabelsFilter) Match(obj client.Object) (bool, error) {
+ // Require f.Namespace to always be set.
+ if f.LabelSelector == nil {
+ return false, fmt.Errorf("the LabelsFilter.LabelSelector field must not be nil: %w", ErrInvalidFilterParams)
+ }
+
+ return f.LabelSelector.Matches(labels.Set(obj.GetLabels())), nil
+}
+
+// ApplyToList implements client.ListOption, but is just a "dummy" implementation in order to implement
+// the interface, so that this struct can be passed to client.Reader.List()
+func (f LabelsFilter) ApplyToList(_ *client.ListOptions) {}
+func (f LabelsFilter) ApplyToDeleteAllOf(_ *client.DeleteAllOfOptions) {}
+
+// ApplyToFilterOptions implements FilterOption
+func (f LabelsFilter) ApplyToFilterOptions(target *FilterOptions) {
+ target.ObjectFilters = append(target.ObjectFilters, f)
+}
diff --git a/pkg/filter/name.go b/pkg/filter/name.go
index 42e516cd..ade3d995 100644
--- a/pkg/filter/name.go
+++ b/pkg/filter/name.go
@@ -4,40 +4,36 @@ import (
"fmt"
"strings"
- "github.com/weaveworks/libgitops/pkg/runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
)
-// NameFilter implements ObjectFilter and ListOption.
+// NameFilter implements ObjectFilter and FilterOption.
+// It also implements client.{List,DeleteAllOf}Option so
+// it can be passed into client.Client.{List,DeleteAllOf}
+// as a way to conveniently filter those lists.
var _ ObjectFilter = NameFilter{}
-var _ ListOption = NameFilter{}
+var _ FilterOption = NameFilter{}
+var _ client.ListOption = NameFilter{}
+var _ client.DeleteAllOfOption = NameFilter{}
-// NameFilter is an ObjectFilter that compares runtime.Object.GetName()
+// NameFilter is an ObjectFilter that compares Object.GetName()
// to the Name field by either equality or prefix.
type NameFilter struct {
// Name matches the object by .metadata.name.
// +required
Name string
- // Namespace matches the object by .metadata.namespace. If left as
- // an empty string, it is ignored when filtering.
- // +optional
- Namespace string
- // MatchPrefix whether the name (not namespace) matching should be exact, or prefix-based.
+ // MatchPrefix whether the name matching should be exact, or prefix-based.
// +optional
MatchPrefix bool
}
-// Filter implements ObjectFilter
-func (f NameFilter) Filter(obj runtime.Object) (bool, error) {
+// Match implements ObjectFilter
+func (f NameFilter) Match(obj client.Object) (bool, error) {
// Require f.Name to always be set.
if len(f.Name) == 0 {
return false, fmt.Errorf("the NameFilter.Name field must not be empty: %w", ErrInvalidFilterParams)
}
- // If f.Namespace is set, and it does not match the object, return false
- if len(f.Namespace) > 0 && f.Namespace != obj.GetNamespace() {
- return false, nil
- }
-
// If the Name should be matched by the prefix, use strings.HasPrefix
if f.MatchPrefix {
return strings.HasPrefix(obj.GetName(), f.Name), nil
@@ -46,9 +42,12 @@ func (f NameFilter) Filter(obj runtime.Object) (bool, error) {
return f.Name == obj.GetName(), nil
}
-// ApplyToListOptions implements ListOption, and adds itself converted to
-// a ListFilter to ListOptions.Filters.
-func (f NameFilter) ApplyToListOptions(target *ListOptions) error {
- target.Filters = append(target.Filters, ObjectToListFilter(f))
- return nil
+// ApplyToList implements client.ListOption, but is just a "dummy" implementation in order to implement
+// the interface, so that this struct can be passed to client.Reader.List()
+func (f NameFilter) ApplyToList(_ *client.ListOptions) {}
+func (f NameFilter) ApplyToDeleteAllOf(_ *client.DeleteAllOfOptions) {}
+
+// ApplyToFilterOptions implements FilterOption
+func (f NameFilter) ApplyToFilterOptions(target *FilterOptions) {
+ target.ObjectFilters = append(target.ObjectFilters, f)
}
diff --git a/pkg/filter/namespace.go b/pkg/filter/namespace.go
new file mode 100644
index 00000000..ae1c8842
--- /dev/null
+++ b/pkg/filter/namespace.go
@@ -0,0 +1,45 @@
+package filter
+
+import (
+ "fmt"
+
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// NamespaceFilter implements ObjectFilter and FilterOption.
+// It also implements client.{List,DeleteAllOf}Option so
+// it can be passed into client.Client.{List,DeleteAllOf}
+// as a way to conveniently filter those lists.
+var _ ObjectFilter = NamespaceFilter{}
+var _ FilterOption = NamespaceFilter{}
+var _ client.ListOption = NamespaceFilter{}
+var _ client.DeleteAllOfOption = NamespaceFilter{}
+
+// NamespaceFilter is an ObjectFilter that compares Object.GetNamespace()
+// to the Namespace field.
+type NamespaceFilter struct {
+ // Namespace matches the object by .metadata.namespace. If left as
+ // an empty string, it is ignored when filtering.
+ // +required
+ Namespace string
+}
+
+// Match implements ObjectFilter
+func (f NamespaceFilter) Match(obj client.Object) (bool, error) {
+ // Require f.Namespace to always be set.
+ if len(f.Namespace) == 0 {
+ return false, fmt.Errorf("the NamespaceFilter.Namespace field must not be empty: %w", ErrInvalidFilterParams)
+ }
+ // Otherwise, just use an equality check
+ return f.Namespace == obj.GetNamespace(), nil
+}
+
+// ApplyToList implements client.ListOption, but is just a "dummy" implementation in order to implement
+// the interface, so that this struct can be passed to client.Reader.List()
+func (f NamespaceFilter) ApplyToList(_ *client.ListOptions) {}
+func (f NamespaceFilter) ApplyToDeleteAllOf(_ *client.DeleteAllOfOptions) {}
+
+// ApplyToFilterOptions implements FilterOption
+func (f NamespaceFilter) ApplyToFilterOptions(target *FilterOptions) {
+ target.ObjectFilters = append(target.ObjectFilters, f)
+}
diff --git a/pkg/filter/options.go b/pkg/filter/options.go
index 4a831dda..6608da30 100644
--- a/pkg/filter/options.go
+++ b/pkg/filter/options.go
@@ -1,27 +1,56 @@
package filter
-// ListOptions is a generic struct for listing options.
-type ListOptions struct {
- // Filters contains a chain of ListFilters, which will be processed in order and pipe the
- // available objects through before returning.
- Filters []ListFilter
+import "sigs.k8s.io/controller-runtime/pkg/client"
+
+// FilterOption is an interface for implementations that know how to
+// mutate FilterOptions.
+type FilterOption interface {
+ // ApplyToFilterOptions applies the configuration of the current object into a target FilterOptions struct.
+ ApplyToFilterOptions(target *FilterOptions)
}
-// ListOption is an interface which can be passed into e.g. List() methods as a variadic-length
-// argument list.
-type ListOption interface {
- // ApplyToListOptions applies the configuration of the current object into a target ListOptions struct.
- ApplyToListOptions(target *ListOptions) error
+// FilterOptions is a set of options for filtering. It implements the ObjectFilter interface
+// itself, so it can be used kind of as a multi-ObjectFilter.
+type FilterOptions struct {
+ // ObjectFilters contains a set of filters for a single object. All of the filters must return
+ // true an a nil error for Match(obj) to return (true, nil).
+ ObjectFilters []ObjectFilter
}
-// MakeListOptions makes a completed ListOptions struct from a list of ListOption implementations.
-func MakeListOptions(opts ...ListOption) (*ListOptions, error) {
- o := &ListOptions{}
- for _, opt := range opts {
- // For every option, apply it into o, and check if there's an error
- if err := opt.ApplyToListOptions(o); err != nil {
- return nil, err
+// Match matches the object against all the ObjectFilters.
+func (o *FilterOptions) Match(obj client.Object) (bool, error) {
+ for _, filter := range o.ObjectFilters {
+ matched, err := filter.Match(obj)
+ if err != nil {
+ return false, err
+ }
+ if !matched {
+ return false, nil
}
}
- return o, nil
+ return true, nil
+}
+
+// ApplyToFilterOptions implements FilterOption
+func (o *FilterOptions) ApplyToFilterOptions(target *FilterOptions) {
+ target.ObjectFilters = append(target.ObjectFilters, o.ObjectFilters...)
+}
+
+// ApplyOptions applies the given FilterOptions to itself and returns itself.
+func (o *FilterOptions) ApplyOptions(opts []FilterOption) *FilterOptions {
+ for _, opt := range opts {
+ opt.ApplyToFilterOptions(o)
+ }
+ return o
+}
+
+// ApplyOption applies one option that aims to implement FilterOption,
+// but at compile-time maybe does not for sure. This can be used for
+// lists of other Options that possibly implement FilterOption in the
+// following way: for _, opt := range opts { filterOpts.ApplyOption(opt) }
+func (o *FilterOptions) ApplyOption(opt interface{}) *FilterOptions {
+ if fOpt, ok := opt.(FilterOption); ok {
+ fOpt.ApplyToFilterOptions(o)
+ }
+ return o
}
diff --git a/pkg/filter/uid.go b/pkg/filter/uid.go
index eea48ffd..1aedab3f 100644
--- a/pkg/filter/uid.go
+++ b/pkg/filter/uid.go
@@ -1,25 +1,23 @@
package filter
import (
- "errors"
"fmt"
"strings"
- "github.com/weaveworks/libgitops/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
)
-var (
- // ErrInvalidFilterParams describes an error where invalid parameters were given
- // to a filter.
- ErrInvalidFilterParams = errors.New("invalid parameters given to filter")
-)
-
-// UIDFilter implements ObjectFilter and ListOption.
+// UIDFilter implements ObjectFilter and FilterOption.
+// It also implements client.{List,DeleteAllOf}Option so
+// it can be passed into client.Client.{List,DeleteAllOf}
+// as a way to conveniently filter those lists.
var _ ObjectFilter = UIDFilter{}
-var _ ListOption = UIDFilter{}
+var _ FilterOption = UIDFilter{}
+var _ client.ListOption = UIDFilter{}
+var _ client.DeleteAllOfOption = UIDFilter{}
-// UIDFilter is an ObjectFilter that compares runtime.Object.GetUID() to
+// UIDFilter is an ObjectFilter that compares Object.GetUID() to
// the UID field by either equality or prefix. The UID field is required,
// otherwise ErrInvalidFilterParams is returned.
type UIDFilter struct {
@@ -31,8 +29,8 @@ type UIDFilter struct {
MatchPrefix bool
}
-// Filter implements ObjectFilter
-func (f UIDFilter) Filter(obj runtime.Object) (bool, error) {
+// Match implements ObjectFilter
+func (f UIDFilter) Match(obj client.Object) (bool, error) {
// Require f.UID to always be set.
if len(f.UID) == 0 {
return false, fmt.Errorf("the UIDFilter.UID field must not be empty: %w", ErrInvalidFilterParams)
@@ -45,9 +43,12 @@ func (f UIDFilter) Filter(obj runtime.Object) (bool, error) {
return f.UID == obj.GetUID(), nil
}
-// ApplyToListOptions implements ListOption, and adds itself converted to
-// a ListFilter to ListOptions.Filters.
-func (f UIDFilter) ApplyToListOptions(target *ListOptions) error {
- target.Filters = append(target.Filters, ObjectToListFilter(f))
- return nil
+// ApplyToList implements client.ListOption, but is just a "dummy" implementation in order to implement
+// the interface, so that this struct can be passed to client.Reader.List()
+func (f UIDFilter) ApplyToList(_ *client.ListOptions) {}
+func (f UIDFilter) ApplyToDeleteAllOf(_ *client.DeleteAllOfOptions) {}
+
+// ApplyToFilterOptions implements FilterOption
+func (f UIDFilter) ApplyToFilterOptions(target *FilterOptions) {
+ target.ObjectFilters = append(target.ObjectFilters, f)
}
diff --git a/pkg/frame/constructors.go b/pkg/frame/constructors.go
new file mode 100644
index 00000000..6e8ebe49
--- /dev/null
+++ b/pkg/frame/constructors.go
@@ -0,0 +1,104 @@
+package frame
+
+import (
+ "bytes"
+ "context"
+
+ "github.com/weaveworks/libgitops/pkg/content"
+)
+
+// 2 generic Reader constructors
+
+func NewSingleReader(ct content.ContentType, r content.Reader, opts ...SingleReaderOption) Reader {
+ return internalFactoryVar.NewSingleReader(ct, r, opts...)
+}
+
+func NewRecognizingReader(ctx context.Context, r content.Reader, opts ...RecognizingReaderOption) Reader {
+ return internalFactoryVar.NewRecognizingReader(ctx, r, opts...)
+}
+
+// 4 JSON-YAML Reader constructors using the default factory
+
+func NewYAMLReader(r content.Reader, opts ...ReaderOption) Reader {
+ return internalFactoryVar.NewReader(content.ContentTypeYAML, r, opts...)
+}
+
+func NewJSONReader(r content.Reader, opts ...ReaderOption) Reader {
+ return internalFactoryVar.NewReader(content.ContentTypeJSON, r, opts...)
+}
+
+func NewSingleYAMLReader(r content.Reader, opts ...SingleReaderOption) Reader {
+ return NewSingleReader(content.ContentTypeYAML, r, opts...)
+}
+
+func NewSingleJSONReader(r content.Reader, opts ...SingleReaderOption) Reader {
+ return NewSingleReader(content.ContentTypeJSON, r, opts...)
+}
+
+// 2 generic Writer constructors
+
+func NewSingleWriter(ct content.ContentType, w content.Writer, opts ...SingleWriterOption) Writer {
+ return internalFactoryVar.NewSingleWriter(ct, w, opts...)
+}
+
+func NewRecognizingWriter(r content.Writer, opts ...RecognizingWriterOption) Writer {
+ return internalFactoryVar.NewRecognizingWriter(r, opts...)
+}
+
+// 4 JSON-YAML Writer constructors using the default factory
+
+func NewYAMLWriter(r content.Writer, opts ...WriterOption) Writer {
+ return internalFactoryVar.NewWriter(content.ContentTypeYAML, r, opts...)
+}
+
+func NewJSONWriter(r content.Writer, opts ...WriterOption) Writer {
+ return internalFactoryVar.NewWriter(content.ContentTypeJSON, r, opts...)
+}
+
+func NewSingleYAMLWriter(r content.Writer, opts ...SingleWriterOption) Writer {
+ return internalFactoryVar.NewSingleWriter(content.ContentTypeYAML, r, opts...)
+}
+
+func NewSingleJSONWriter(r content.Writer, opts ...SingleWriterOption) Writer {
+ return internalFactoryVar.NewSingleWriter(content.ContentTypeJSON, r, opts...)
+}
+
+// 1 single, 3 YAML and 1 recognizing content.Reader helper constructors
+
+/*func FromSingleBuffer(ct content.ContentType, buf *bytes.Buffer, opts ...SingleReaderOption) Reader {
+ return NewSingleReader(ct, content.FromBuffer(buf), opts...)
+}*/
+
+func FromYAMLBytes(yamlBytes []byte, opts ...ReaderOption) Reader {
+ return NewYAMLReader(content.FromBytes(yamlBytes), opts...)
+}
+
+func FromYAMLString(yamlStr string, opts ...ReaderOption) Reader {
+ return NewYAMLReader(content.FromString(yamlStr), opts...)
+}
+
+func FromYAMLFile(filePath string, opts ...ReaderOption) Reader {
+ return NewYAMLReader(content.FromFile(filePath), opts...)
+}
+
+func FromFile(ctx context.Context, filePath string, opts ...RecognizingReaderOption) Reader {
+ return NewRecognizingReader(ctx, content.FromFile(filePath), opts...)
+}
+
+// 1 single, 2 YAML and 1 recognizing content.Writer helper constructors
+
+func ToSingleBuffer(ct content.ContentType, buf *bytes.Buffer, opts ...SingleWriterOption) Writer {
+ return NewSingleWriter(ct, content.ToBuffer(buf), opts...)
+}
+
+func ToYAMLBuffer(buf *bytes.Buffer, opts ...WriterOption) Writer {
+ return NewYAMLWriter(content.NewWriter(buf), opts...)
+}
+
+func ToYAMLFile(filePath string, opts ...WriterOption) Writer {
+ return NewYAMLWriter(content.ToFile(filePath), opts...)
+}
+
+func ToFile(filePath string, opts ...RecognizingWriterOption) Writer {
+ return NewRecognizingWriter(content.ToFile(filePath), opts...)
+}
diff --git a/pkg/frame/errors.go b/pkg/frame/errors.go
new file mode 100644
index 00000000..e4539ce1
--- /dev/null
+++ b/pkg/frame/errors.go
@@ -0,0 +1,38 @@
+package frame
+
+import (
+ "fmt"
+
+ "github.com/weaveworks/libgitops/pkg/util/limitedio"
+ "github.com/weaveworks/libgitops/pkg/util/structerr"
+)
+
+// Enforce all struct errors implementing structerr.StructError
+var _ structerr.StructError = &FrameCountOverflowError{}
+
+// FrameCountOverflowError is returned when a Reader or Writer would process more
+// frames than allowed.
+type FrameCountOverflowError struct {
+ // +optional
+ MaxFrameCount limitedio.Limit
+}
+
+func (e *FrameCountOverflowError) Error() string {
+ msg := "no more frames can be processed, hit maximum amount"
+ if e.MaxFrameCount < 0 {
+ msg = fmt.Sprintf("%s: infinity", msg) // this is most likely a programming error
+ } else if e.MaxFrameCount > 0 {
+ msg = fmt.Sprintf("%s: %d", msg, e.MaxFrameCount)
+ }
+ return msg
+}
+
+func (e *FrameCountOverflowError) Is(target error) bool {
+ _, ok := target.(*FrameCountOverflowError)
+ return ok
+}
+
+// ErrFrameCountOverflow creates a *FrameCountOverflowError
+func ErrFrameCountOverflow(maxFrames limitedio.Limit) *FrameCountOverflowError {
+ return &FrameCountOverflowError{MaxFrameCount: maxFrames}
+}
diff --git a/pkg/frame/interfaces.go b/pkg/frame/interfaces.go
new file mode 100644
index 00000000..e6224849
--- /dev/null
+++ b/pkg/frame/interfaces.go
@@ -0,0 +1,160 @@
+package frame
+
+import (
+ "context"
+
+ "github.com/weaveworks/libgitops/pkg/content"
+)
+
+// TODO: Maybe implement/use context-aware (cancellable) io.Readers and io.Writers underneath?
+
+// Closer is like io.Closer, but with a Context passed along as well.
+type Closer interface {
+ // Close closes the underlying resource. If Close is called multiple times, the
+ // underlying io.Closer decides the behavior and return value. If Close is called
+ // during a Read/Write operation, the underlying io.ReadCloser/io.WriteCloser
+ // decides the behavior.
+ Close(ctx context.Context) error
+}
+
+// Reader is a framing type specific reader of an underlying io.Reader or io.ReadCloser.
+// If an io.Reader is used, Close(ctx) is a no-op. If an io.ReadCloser is used, Close(ctx)
+// will close the underlying io.ReadCloser.
+//
+// The Reader returns frames, as defined by the relevant framing type.
+// For example, for YAML a frame represents a YAML document, while JSON is a self-framing
+// format, i.e. encoded objects can be written to a stream just as
+// '{ "a": "" ... }{ "b": "" ... }' and separated from there.
+//
+// Another way of defining a "frame" is that it MUST contain exactly one decodable object.
+// This means that no empty (i.e. len(frame) == 0) frames shall be returned. Note: The decodable
+// object might represent a list object (e.g. as Kubernetes' v1.List); more generally something
+// decodable into a Go struct.
+//
+// The Reader can use as many underlying Read(p []byte) (n int, err error) calls it needs
+// to the underlying io.Read(Clos)er. As long as frames can successfully be read from the underlying
+// io.Read(Clos)er, len(frame) != 0 and err == nil. When io.EOF is encountered, len(frame) == 0 and
+// errors.Is(err, io.EOF) == true.
+//
+// The Reader MUST be thread-safe, i.e. it must use the underlying io.Reader responsibly
+// without causing race conditions when reading, e.g. by guarding reads with a mutual
+// exclusion lock (mutex). The mutex isn't locked for closes, however. This enables e.g. closing the
+// reader during a read operation, and other custom closing behaviors.
+//
+// The Reader MUST directly abort the read operation if the frame size exceeds
+// ReadWriterOptions.MaxFrameSize, and return ErrFrameSizeOverflow.
+//
+// The Reader MUST return ErrFrameCountOverflow if the underlying Reader has returned more than
+// ReadWriterOptions.MaxFrameCount successful read operations. The "total" frame limit is
+// 10 * ReadWriterOptions.MaxFrameCount, which includes failed, empty and successful frames.
+// Returned errors (including io.EOF) MUST be checked for equality using
+// errors.Is(err, target), NOT using err == target.
+//
+// TODO: Say that the ContentType is assumed constant per content.Reader
+//
+// The Reader MAY respect cancellation signals on the context, depending on ReaderOptions.
+// The Reader MAY support reporting trace spans for how long certain operations take.
+type Reader interface {
+ // The Reader is specific to possibly multiple framing types
+ content.ContentTyped
+
+ // ReadFrame reads one frame from the underlying io.Read(Clos)er. At maximum, the frame is as
+ // large as ReadWriterOptions.MaxFrameSize. See the documentation on the Reader interface for more
+ // details.
+ ReadFrame(ctx context.Context) ([]byte, error)
+
+ // Exposes Metadata about the underlying io.Reader
+ content.MetadataContainer
+
+ // The Reader can be closed. If an underlying io.Reader is used, this is a no-op. If an
+ // io.ReadCloser is used, this will close that io.ReadCloser.
+ Closer
+}
+
+type ReaderFactory interface {
+ // ct is dominant; will error if r has a conflicting content type
+ // ct must be one of the supported content types
+ NewReader(ct content.ContentType, r content.Reader, opts ...ReaderOption) Reader
+ // opts.MaxFrameCount is dominant, will always be set to 1
+ // ct can be anything
+ // ct is dominant; will error if r has a conflicting content type
+ // Single options should not have MaxFrameCount at all, if possible
+ NewSingleReader(ct content.ContentType, r content.Reader, opts ...SingleReaderOption) Reader
+ // will use the content type from r if set, otherwise infer from content metadata
+ // or peek bytes using the content.ContentTypeRecognizer
+ // should add to options for a recognizer
+ NewRecognizingReader(ctx context.Context, r content.Reader, opts ...RecognizingReaderOption) Reader
+
+ //SupportedContentTypes()
+}
+
+// Writer is a framing type specific writer to an underlying io.Writer or io.WriteCloser.
+// If an io.Writer is used, Close(ctx) is a no-op. If an io.WriteCloser is used, Close(ctx)
+// will close the underlying io.WriteCloser.
+//
+// The Writer writes frames to the underlying stream, as defined by the framing type.
+// For example, for YAML a frame represents a YAML document, while JSON is a self-framing
+// format, i.e. encoded objects can be written to a stream just as
+// '{ "a": "" ... }{ "b": "" ... }'.
+//
+// Another way of defining a "frame" is that it MUST contain exactly one decodable object.
+// It is valid (but not recommended) to supply empty frames to the Writer.
+//
+// Writer will only call the underlying io.Write(Close)r's Write(p []byte) call once.
+// If n < len(frame) and err == nil, io.ErrShortWrite will be returned. This means that
+// it's the underlying io.Writer's responsibility to buffer the frame data, if needed.
+//
+// The Writer MUST be thread-safe, i.e. it must use the underlying io.Writer responsibly
+// without causing race conditions when reading, e.g. by guarding writes/closes with a
+// mutual exclusion lock (mutex). The mutex isn't locked for closes, however.
+// This enables e.g. closing the writer during a write operation, and other custom closing behaviors.
+//
+// The Writer MUST directly abort the write operation if the frame size exceeds ReadWriterOptions.MaxFrameSize,
+// and return ErrFrameSizeOverflow. The Writer MUST ignore empty frames, where len(frame) == 0, possibly
+// after sanitation. The Writer MUST return ErrFrameCountOverflow if WriteFrame has been called more than
+// ReadWriterOptions.MaxFrameCount times.
+//
+// Returned errors MUST be checked for equality using errors.Is(err, target), NOT using err == target.
+//
+// The Writer MAY respect cancellation signals on the context, depending on WriterOptions.
+// The Writer MAY support reporting trace spans for how long certain operations take.
+//
+// TODO: Say that the ContentType is assumed constant per content.Writer
+type Writer interface {
+ // The Writer is specific to this framing type.
+ content.ContentTyped
+ // WriteFrame writes one frame to the underlying io.Write(Close)r.
+ // See the documentation on the Writer interface for more details.
+ WriteFrame(ctx context.Context, frame []byte) error
+
+ // Exposes metadata from the underlying content.Writer
+ content.MetadataContainer
+
+ // The Writer can be closed. If an underlying io.Writer is used, this is a no-op. If an
+ // io.WriteCloser is used, this will close that io.WriteCloser.
+ Closer
+}
+
+type WriterFactory interface {
+ // ct is dominant; will error if r has a conflicting content type
+ // ct must be one of the supported content types
+ NewWriter(ct content.ContentType, w content.Writer, opts ...WriterOption) Writer
+ // opts.MaxFrameCount is dominant, will always be set to 1
+ // ct can be anything
+ // ct is dominant; will error if r has a conflicting content type
+ // Single options should not have MaxFrameCount at all, if possible
+ NewSingleWriter(ct content.ContentType, w content.Writer, opts ...SingleWriterOption) Writer
+ // will use the content type from r if set, otherwise infer from content metadata
+ // using the content.ContentTypeRecognizer
+ // should add to options for a recognizer
+ NewRecognizingWriter(w content.Writer, opts ...RecognizingWriterOption) Writer
+
+ // The SupportedContentTypes() method specifies what content types are supported by the
+ // NewWriter
+ content.ContentTypeSupporter
+}
+
+type Factory interface {
+ ReaderFactory
+ WriterFactory
+}
diff --git a/pkg/frame/k8s_reader_streaming.go b/pkg/frame/k8s_reader_streaming.go
new file mode 100644
index 00000000..9ff21cec
--- /dev/null
+++ b/pkg/frame/k8s_reader_streaming.go
@@ -0,0 +1,110 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file provides a means to read one whole frame from an io.ReadCloser
+// returned by a k8s.io/apimachinery/pkg/runtime.Framer.NewFrameReader()
+//
+// This code is (temporarily) forked and derived from
+// https://github.com/kubernetes/apimachinery/blob/v0.21.2/pkg/runtime/serializer/streaming/streaming.go
+// and will be upstreamed if maintainers allow. The reason for forking this
+// small piece of code is two-fold: a) This functionality is bundled within
+// a runtime.Decoder, not provided as "just" some type of Reader, b) The
+// upstream doesn't allow to configure the maximum frame size.
+
+package frame
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/weaveworks/libgitops/pkg/content"
+ "github.com/weaveworks/libgitops/pkg/util/limitedio"
+ "k8s.io/apimachinery/pkg/runtime/serializer/streaming"
+)
+
+// Ref: https://github.com/kubernetes/apimachinery/blob/v0.21.2/pkg/runtime/serializer/streaming/streaming.go#L63-L67
+func newK8sStreamingReader(rc io.ReadCloser, maxFrameSize int64) content.ClosableRawSegmentReader {
+ if maxFrameSize == 0 {
+ maxFrameSize = limitedio.DefaultMaxReadSize.Int64()
+ }
+
+ return &k8sStreamingReaderImpl{
+ reader: rc,
+ buf: make([]byte, 1024),
+ // CHANGE: maxBytes is configurable
+ maxBytes: maxFrameSize,
+ }
+}
+
+// Ref: https://github.com/kubernetes/apimachinery/blob/v0.21.2/pkg/runtime/serializer/streaming/streaming.go#L51-L57
+type k8sStreamingReaderImpl struct {
+ reader io.ReadCloser
+ buf []byte
+ // CHANGE: In the original code, maxBytes was an int. int64 is more specific and flexible, however.
+ // TODO: Re-review this code; shall we have int or int64 here?
+ maxBytes int64
+ resetRead bool
+}
+
+// Ref: https://github.com/kubernetes/apimachinery/blob/v0.21.2/pkg/runtime/serializer/streaming/streaming.go#L75-L106
+func (d *k8sStreamingReaderImpl) Read() ([]byte, error) {
+ base := 0
+ for {
+ n, err := d.reader.Read(d.buf[base:])
+ if err == io.ErrShortBuffer {
+ if n == 0 {
+ return nil, fmt.Errorf("got short buffer with n=0, base=%d, cap=%d", base, cap(d.buf))
+ }
+ if d.resetRead {
+ continue
+ }
+ // double the buffer size up to maxBytes
+ // NOTE: This might need changing upstream eventually, it only works when
+ // d.maxBytes/len(d.buf) is a multiple of 2
+ // CHANGE: In the original code no cast from int -> int64 was needed
+ bufLen := int64(len(d.buf))
+ if bufLen < d.maxBytes {
+ base += n
+ // CHANGE: Instead of unconditionally doubling the buffer, double the buffer
+ // length only to the extent it fits within d.maxBytes. Previously, it was a
+ // requirement that d.maxBytes was a multiple of 1024 for this logic to work.
+ newBytes := len(d.buf)
+ if d.maxBytes < 2*bufLen {
+ newBytes = int(d.maxBytes - bufLen)
+ }
+ d.buf = append(d.buf, make([]byte, newBytes)...)
+ continue
+ }
+ // must read the rest of the frame (until we stop getting ErrShortBuffer)
+ d.resetRead = true
+ // base = 0 // CHANGE: Not needed (as pointed out by golangci-lint:ineffassign)
+ return nil, streaming.ErrObjectTooLarge
+ }
+ if err != nil {
+ return nil, err
+ }
+ if d.resetRead {
+ // now that we have drained the large read, continue
+ d.resetRead = false
+ continue
+ }
+ base += n
+ break
+ }
+ return d.buf[:base], nil
+}
+
+func (d *k8sStreamingReaderImpl) Close() error { return d.reader.Close() }
diff --git a/pkg/frame/k8s_reader_yaml.go b/pkg/frame/k8s_reader_yaml.go
new file mode 100644
index 00000000..eac7c50c
--- /dev/null
+++ b/pkg/frame/k8s_reader_yaml.go
@@ -0,0 +1,130 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This file provides a means to extract one YAML frame from an io.ReadCloser
+//
+// This code is (temporarily) forked and derived from
+// https://github.com/kubernetes/apimachinery/blob/v0.21.2/pkg/util/yaml/decoder.go#L111
+// and will be upstreamed if maintainers allow. The reason for forking this
+// small piece of code is two-fold: a) The upstream doesn't allow configuring
+// the maximum frame size, but hard-codes it to 5MB and b) for the first
+// frame, the "---\n" prefix is returned and would otherwise be unnecessarily
+// counted as frame content, when it actually is a frame separator.
+
+package frame
+
+import (
+ "bufio"
+ "bytes"
+ "io"
+)
+
+// k8sYAMLReader reads chunks of objects and returns ErrShortBuffer if
+// the data is not sufficient.
+type k8sYAMLReader struct {
+ r io.ReadCloser
+ scanner *bufio.Scanner
+ remaining []byte
+}
+
+// newK8sYAMLReader decodes YAML documents from the provided
+// stream in chunks by converting each document (as defined by
+// the YAML spec) into its own chunk. io.ErrShortBuffer will be
+// returned if the entire buffer could not be read to assist
+// the caller in framing the chunk.
+func newK8sYAMLReader(r io.ReadCloser, maxFrameSize int) io.ReadCloser {
+ scanner := bufio.NewScanner(r)
+ // the size of initial allocation for buffer 4k
+ buf := make([]byte, 4*1024)
+ // the maximum size used to buffer a token 5M
+ scanner.Buffer(buf, maxFrameSize)
+ scanner.Split(splitYAMLDocument)
+ return &k8sYAMLReader{
+ r: r,
+ scanner: scanner,
+ }
+}
+
+// Read reads the previous slice into the buffer, or attempts to read
+// the next chunk.
+// TODO: switch to readline approach.
+func (d *k8sYAMLReader) Read(data []byte) (n int, err error) {
+ left := len(d.remaining)
+ if left == 0 {
+ // return the next chunk from the stream
+ if !d.scanner.Scan() {
+ err := d.scanner.Err()
+ if err == nil {
+ err = io.EOF
+ }
+ return 0, err
+ }
+ out := d.scanner.Bytes()
+ // TODO: This could be removed by the sanitation step; we don't have to
+ // do it here at this point.
+ out = bytes.TrimPrefix(out, []byte("---\n"))
+ d.remaining = out
+ left = len(out)
+ }
+
+ // fits within data
+ if left <= len(data) {
+ copy(data, d.remaining)
+ d.remaining = nil
+ return left, nil
+ }
+
+ // caller will need to reread
+ copy(data, d.remaining[:len(data)])
+ d.remaining = d.remaining[len(data):]
+ return len(data), io.ErrShortBuffer
+}
+
+func (d *k8sYAMLReader) Close() error {
+ return d.r.Close()
+}
+
+const yamlSeparator = "\n---"
+
+// splitYAMLDocument is a bufio.SplitFunc for splitting YAML streams into individual documents.
+func splitYAMLDocument(data []byte, atEOF bool) (advance int, token []byte, err error) {
+ if atEOF && len(data) == 0 {
+ return 0, nil, nil
+ }
+ sep := len([]byte(yamlSeparator))
+ if i := bytes.Index(data, []byte(yamlSeparator)); i >= 0 {
+ // We have a potential document terminator
+ i += sep
+ after := data[i:]
+ if len(after) == 0 {
+ // we can't read any more characters
+ if atEOF {
+ return len(data), data[:len(data)-sep], nil
+ }
+ return 0, nil, nil
+ }
+ if j := bytes.IndexByte(after, '\n'); j >= 0 {
+ return i + j + 1, data[0 : i-sep], nil
+ }
+ return 0, nil, nil
+ }
+ // If we're at EOF, we have a final, non-terminated line. Return it.
+ if atEOF {
+ return len(data), data, nil
+ }
+ // Request more data.
+ return 0, nil, nil
+}
diff --git a/pkg/frame/options.go b/pkg/frame/options.go
new file mode 100644
index 00000000..eb2b74ee
--- /dev/null
+++ b/pkg/frame/options.go
@@ -0,0 +1,237 @@
+package frame
+
+import (
+ "github.com/weaveworks/libgitops/pkg/content"
+ "github.com/weaveworks/libgitops/pkg/frame/sanitize"
+ "github.com/weaveworks/libgitops/pkg/util/limitedio"
+)
+
+// TODO: Figure out a new Options pattern, in the form of:
+
+/*
+func SomeOperation(bla string, opts ...Option) {
+ o := defaultOpts().ApplyOptions(opts)
+
+ // Call "downstream"
+ SomeCompositeOperation(bla, opts...)
+}
+
+func SomeCompositeOperation(bla string, opts ...Option) {
+ o := defaultExtOpts().ApplyOptionsToExt(opts)
+}
+
+func defaultOpts() *Options {
+ return &Options{"abc", nil}
+}
+
+type Options struct {
+ Foo string
+ Bar *bool
+}
+
+func (o *Options) GetOptions() *Options {return o}
+func (o *Options) ApplyTo(t OptionsTarget) {
+ target := t.GetOptions()
+ if len(o.Foo) != 0 {
+ target.Foo = o.Foo
+ }
+ if o.Bar != nil {
+ target.Bar = o.Bar
+ }
+}
+func (o *Options) ApplyOptions(opts []Option) *Options {
+ for _, opt := range opts {
+ opt.ApplyTo(o)
+ }
+ return o
+}
+
+func defaultExtOpts() *ExtOptions {
+ return &ExtOptions{
+ OptionsTarget: defaultOpts,
+ Baz: 1,
+ }
+}
+
+type ExtOptions struct {
+ OptionsTarget
+ Baz int64
+}
+
+func (o *ExtOptions) GetExtOptions() *ExtOptions {return o}
+func (o *ExtOptions) ApplyTo(t OptionsTarget) {
+ ext, ok := t.(ExtOptionsTarget)
+ if !ok {
+ return
+ }
+ target := ext.GetExtOptions()
+ if o.Baz != 0 {
+ target.Baz = o.Baz
+ }
+}
+func (o *ExtOptions) ApplyOptionsToExt(opts []Option) *ExtOptions {
+ for _, opt := range opts {
+ opt.ApplyTo(o)
+ }
+ return o
+}
+
+type Option interface {
+ ApplyTo(OptionsTarget)
+}
+type OptionsTarget interface {
+ GetOptions() *Options
+ // ApplyOptions(opts []Option) *Options
+}
+type ExtOptionsTarget interface {
+ OptionsTarget
+ GetExtOptions() *ExtOptions
+ // ApplyOptionsToExt(opts []Option) *ExtOptions
+}
+*/
+
+// DefaultMaxFrameCount specifies the default maximum of frames that can be read by a Reader.
+const DefaultReadMaxFrameCount = 1024
+
+type singleReaderOptions struct{ SingleOptions }
+type singleWriterOptions struct{ SingleOptions }
+type readerOptions struct{ Options }
+type writerOptions struct{ Options }
+type recognizingReaderOptions struct{ RecognizingOptions }
+type recognizingWriterOptions struct{ RecognizingOptions }
+
+func defaultSingleReaderOptions() *singleReaderOptions {
+ return &singleReaderOptions{
+ SingleOptions: SingleOptions{
+ MaxFrameSize: limitedio.DefaultMaxReadSize,
+ Sanitizer: sanitize.NewJSONYAML(),
+ },
+ }
+}
+
+func defaultSingleWriterOptions() *singleWriterOptions {
+ return &singleWriterOptions{
+ SingleOptions: SingleOptions{
+ MaxFrameSize: limitedio.Infinite,
+ Sanitizer: sanitize.NewJSONYAML(),
+ },
+ }
+}
+
+func defaultReaderOptions() *readerOptions {
+ return &readerOptions{
+ Options: Options{
+ SingleOptions: defaultSingleReaderOptions().SingleOptions,
+ MaxFrameCount: DefaultReadMaxFrameCount,
+ },
+ }
+}
+
+func defaultWriterOptions() *writerOptions {
+ return &writerOptions{
+ Options: Options{
+ SingleOptions: defaultSingleWriterOptions().SingleOptions,
+ MaxFrameCount: limitedio.Infinite,
+ },
+ }
+}
+
+func defaultRecognizingReaderOptions() *recognizingReaderOptions {
+ return &recognizingReaderOptions{
+ RecognizingOptions: RecognizingOptions{
+ Options: defaultReaderOptions().Options,
+ Recognizer: content.NewJSONYAMLContentTypeRecognizer(),
+ },
+ }
+}
+
+func defaultRecognizingWriterOptions() *recognizingWriterOptions {
+ return &recognizingWriterOptions{
+ RecognizingOptions: RecognizingOptions{
+ Options: defaultWriterOptions().Options,
+ Recognizer: content.NewJSONYAMLContentTypeRecognizer(),
+ },
+ }
+}
+
+type SingleOptions struct {
+ // MaxFrameSize specifies the maximum allowed frame size that can be read and returned.
+ // Must be a positive integer. Defaults to DefaultMaxFrameSize. TODO
+ MaxFrameSize limitedio.Limit
+ // Sanitizer configures the sanitizer that should be used for sanitizing the frames.
+ Sanitizer sanitize.Sanitizer
+ // TODO: Experiment
+ //MetadataOptions []metadata.HeaderOption
+}
+
+func (o SingleOptions) applyToSingle(target *SingleOptions) {
+ if o.MaxFrameSize != 0 {
+ target.MaxFrameSize = o.MaxFrameSize
+ }
+ if o.Sanitizer != nil {
+ target.Sanitizer = o.Sanitizer
+ }
+ /*if len(o.MetadataOptions) != 0 {
+ target.MetadataOptions = append(target.MetadataOptions, o.MetadataOptions...)
+ }*/
+}
+
+type Options struct {
+ SingleOptions
+
+ // MaxFrameCount specifies the maximum amount of successful frames that can be read or written
+ // using a Reader or Writer. This means that e.g. empty frames after sanitation are NOT
+ // counted as a frame in this context. When reading, there can be a maximum of 10*MaxFrameCount
+ // in total (including failed and empty). Must be a positive integer. Defaults: TODO DefaultMaxFrameCount.
+ MaxFrameCount limitedio.Limit
+}
+
+func (o Options) applyTo(target *Options) {
+ if o.MaxFrameCount != 0 {
+ target.MaxFrameCount = o.MaxFrameCount
+ }
+ o.applyToSingle(&target.SingleOptions)
+}
+
+type RecognizingOptions struct {
+ Options
+
+ Recognizer content.ContentTypeRecognizer
+}
+
+func (o RecognizingOptions) applyToRecognizing(target *RecognizingOptions) {
+ if o.Recognizer != nil {
+ target.Recognizer = o.Recognizer
+ }
+ o.applyTo(&target.Options)
+}
+
+type SingleReaderOption interface {
+ ApplyToSingleReader(target *singleReaderOptions)
+}
+
+type SingleWriterOption interface {
+ ApplyToSingleWriter(target *singleWriterOptions)
+}
+
+type ReaderOption interface {
+ ApplyToReader(target *readerOptions)
+}
+
+type WriterOption interface {
+ ApplyToWriter(target *writerOptions)
+}
+
+type RecognizingReaderOption interface {
+ ApplyToRecognizingReader(target *recognizingReaderOptions)
+}
+
+type RecognizingWriterOption interface {
+ ApplyToRecognizingWriter(target *recognizingWriterOptions)
+}
+
+/*
+TODO: Is this needed?
+func WithMetadata(mopts ...metadata.HeaderOption) SingleOptions {
+ return SingleOptions{MetadataOptions: mopts}
+}*/
diff --git a/pkg/frame/options_boilerplate.go b/pkg/frame/options_boilerplate.go
new file mode 100644
index 00000000..097421f4
--- /dev/null
+++ b/pkg/frame/options_boilerplate.go
@@ -0,0 +1,114 @@
+package frame
+
+var (
+ _ SingleReaderOption = SingleOptions{}
+ _ SingleWriterOption = SingleOptions{}
+ _ ReaderOption = SingleOptions{}
+ _ WriterOption = SingleOptions{}
+ _ RecognizingReaderOption = SingleOptions{}
+ _ RecognizingWriterOption = SingleOptions{}
+
+ _ SingleReaderOption = Options{}
+ _ SingleWriterOption = Options{}
+ _ ReaderOption = Options{}
+ _ WriterOption = Options{}
+ _ RecognizingReaderOption = Options{}
+ _ RecognizingWriterOption = Options{}
+
+ _ SingleReaderOption = RecognizingOptions{}
+ _ SingleWriterOption = RecognizingOptions{}
+ _ ReaderOption = RecognizingOptions{}
+ _ WriterOption = RecognizingOptions{}
+ _ RecognizingReaderOption = RecognizingOptions{}
+ _ RecognizingWriterOption = RecognizingOptions{}
+)
+
+func (o SingleOptions) ApplyToSingleReader(target *singleReaderOptions) {
+ o.applyToSingle(&target.SingleOptions)
+}
+
+func (o SingleOptions) ApplyToSingleWriter(target *singleWriterOptions) {
+ o.applyToSingle(&target.SingleOptions)
+}
+
+func (o SingleOptions) ApplyToReader(target *readerOptions) {
+ o.applyToSingle(&target.SingleOptions)
+}
+
+func (o SingleOptions) ApplyToWriter(target *writerOptions) {
+ o.applyToSingle(&target.SingleOptions)
+}
+
+func (o SingleOptions) ApplyToRecognizingReader(target *recognizingReaderOptions) {
+ o.applyToSingle(&target.SingleOptions)
+}
+
+func (o SingleOptions) ApplyToRecognizingWriter(target *recognizingWriterOptions) {
+ o.applyToSingle(&target.SingleOptions)
+}
+
+func (o Options) ApplyToReader(target *readerOptions) {
+ o.applyTo(&target.Options)
+}
+
+func (o Options) ApplyToWriter(target *writerOptions) {
+ o.applyTo(&target.Options)
+}
+
+func (o Options) ApplyToRecognizingReader(target *recognizingReaderOptions) {
+ o.applyTo(&target.Options)
+}
+
+func (o Options) ApplyToRecognizingWriter(target *recognizingWriterOptions) {
+ o.applyTo(&target.Options)
+}
+
+func (o RecognizingOptions) ApplyToRecognizingReader(target *recognizingReaderOptions) {
+ o.applyToRecognizing(&target.RecognizingOptions)
+}
+
+func (o RecognizingOptions) ApplyToRecognizingWriter(target *recognizingWriterOptions) {
+ o.applyToRecognizing(&target.RecognizingOptions)
+}
+
+func (o *singleReaderOptions) applyOptions(opts []SingleReaderOption) *singleReaderOptions {
+ for _, opt := range opts {
+ opt.ApplyToSingleReader(o)
+ }
+ return o
+}
+
+func (o *singleWriterOptions) applyOptions(opts []SingleWriterOption) *singleWriterOptions {
+ for _, opt := range opts {
+ opt.ApplyToSingleWriter(o)
+ }
+ return o
+}
+
+func (o *readerOptions) applyOptions(opts []ReaderOption) *readerOptions {
+ for _, opt := range opts {
+ opt.ApplyToReader(o)
+ }
+ return o
+}
+
+func (o *writerOptions) applyOptions(opts []WriterOption) *writerOptions {
+ for _, opt := range opts {
+ opt.ApplyToWriter(o)
+ }
+ return o
+}
+
+func (o *recognizingReaderOptions) applyOptions(opts []RecognizingReaderOption) *recognizingReaderOptions {
+ for _, opt := range opts {
+ opt.ApplyToRecognizingReader(o)
+ }
+ return o
+}
+
+func (o *recognizingWriterOptions) applyOptions(opts []RecognizingWriterOption) *recognizingWriterOptions {
+ for _, opt := range opts {
+ opt.ApplyToRecognizingWriter(o)
+ }
+ return o
+}
diff --git a/pkg/frame/options_test.go b/pkg/frame/options_test.go
new file mode 100644
index 00000000..73c05271
--- /dev/null
+++ b/pkg/frame/options_test.go
@@ -0,0 +1,153 @@
+package frame
+
+/*
+func compareOptions(t *testing.T, name string, got, want interface{}) {
+ // We want to include the unexported tracer field when comparing TracerOptions, hence use reflect.DeepEqual
+ // for the comparison
+ opt := cmp.Comparer(func(x, y tracing.TracerOptions) bool {
+ return reflect.DeepEqual(x, y)
+ })
+ // Report error with diff if not equal
+ if !cmp.Equal(got, want, opt) {
+ t.Errorf("%s: got vs want: %s", name, cmp.Diff(got, want, opt))
+ }
+}
+
+func TestApplyReaderOptions(t *testing.T) {
+ defaultWithMutation := func(apply func(*ReaderOptions)) *ReaderOptions {
+ o := defaultReaderOpts()
+ apply(o)
+ return o
+ }
+ tests := []struct {
+ name string
+ opts []ReaderOption
+ fromDefault bool
+ want *ReaderOptions
+ }{
+ {
+ name: "simple defaults",
+ fromDefault: true,
+ want: defaultReaderOpts(),
+ },
+ {
+ name: "MaxFrameSize: apply",
+ opts: []ReaderOption{&ReaderWriterOptions{MaxFrameSize: 1234}},
+ want: &ReaderOptions{ReaderWriterOptions: ReaderWriterOptions{MaxFrameSize: 1234}},
+ },
+ {
+ name: "MaxFrameSize: override default",
+ opts: []ReaderOption{&ReaderWriterOptions{MaxFrameSize: 1234}},
+ fromDefault: true,
+ want: defaultWithMutation(func(ro *ReaderOptions) {
+ ro.MaxFrameSize = 1234
+ }),
+ },
+ {
+ name: "MaxFrameSize: zero value has no effect",
+ opts: []ReaderOption{&ReaderWriterOptions{MaxFrameSize: 0}},
+ fromDefault: true,
+ want: defaultReaderOpts(),
+ },
+ {
+ name: "MaxFrameSize: latter overrides earlier, if set",
+ opts: []ReaderOption{
+ &ReaderWriterOptions{MaxFrameSize: 1234},
+ &ReaderWriterOptions{MaxFrameSize: 4321},
+ &ReaderWriterOptions{MaxFrameSize: 0},
+ },
+ want: &ReaderOptions{ReaderWriterOptions: ReaderWriterOptions{MaxFrameSize: 4321}},
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ var from *ReaderOptions
+ if tt.fromDefault {
+ from = defaultReaderOpts()
+ } else {
+ from = &ReaderOptions{}
+ }
+
+ got := from.ApplyOptions(tt.opts)
+ compareOptions(t, "TestApplyReaderOptions", got, tt.want)
+ })
+ }
+}
+
+
+func TestApplyReaderWriterOptions(t *testing.T) {
+ defReadWithMutation := func(apply func(*ReaderOptions)) *ReaderOptions {
+ o := defaultReaderOpts()
+ apply(o)
+ return o
+ }
+ defWriteWithMutation := func(apply func(*WriterOptions)) *WriterOptions {
+ o := defaultWriterOpts()
+ apply(o)
+ return o
+ }
+ barTracer := otel.GetTracerProvider().Tracer("bar")
+ tests := []struct {
+ name string
+ opts []ReaderWriterOption
+ fromDefault bool
+ wantReader *ReaderOptions
+ wantWriter *WriterOptions
+ }{
+ {
+ name: "simple defaults",
+ fromDefault: true,
+ wantReader: defaultReaderOpts(),
+ wantWriter: defaultWriterOpts(),
+ },
+ {
+ name: "WithTracerOptions: Set Tracer.Name",
+ fromDefault: true,
+ opts: []ReaderWriterOption{WithTracerOptions(tracing.TracerOptions{Name: "foo"})},
+ wantReader: defReadWithMutation(func(ro *ReaderOptions) {
+ ro.Tracer.Name = "foo"
+ }),
+ wantWriter: defWriteWithMutation(func(wo *WriterOptions) {
+ wo.Tracer.Name = "foo"
+ }),
+ },
+ {
+ name: "WithTracerOptions: Set Tracer",
+ fromDefault: true,
+ opts: []ReaderWriterOption{WithTracerOptions(tracing.WithTracer(barTracer))},
+ wantReader: defReadWithMutation(func(ro *ReaderOptions) {
+ // The tracer field is private, hence we need to configure it like this
+ tracing.WithTracer(barTracer).ApplyToTracer(&ro.Tracer)
+ }),
+ wantWriter: defWriteWithMutation(func(wo *WriterOptions) {
+ // The tracer field is private, hence we need to configure it like this
+ tracing.WithTracer(barTracer).ApplyToTracer(&wo.Tracer)
+ }),
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ var fromReader *ReaderOptions
+ var fromWriter *WriterOptions
+ if tt.fromDefault {
+ fromReader = defaultReaderOpts()
+ fromWriter = defaultWriterOpts()
+ } else {
+ fromReader = &ReaderOptions{}
+ fromWriter = &WriterOptions{}
+ }
+
+ readOpts := []ReaderOption{}
+ writeOpts := []WriterOption{}
+ for _, opt := range tt.opts {
+ readOpts = append(readOpts, opt)
+ writeOpts = append(writeOpts, opt)
+ }
+
+ gotReader := fromReader.ApplyOptions(readOpts)
+ gotWriter := fromWriter.ApplyOptions(writeOpts)
+ compareOptions(t, "TestApplyReaderWriterOptions", gotReader, tt.wantReader)
+ compareOptions(t, "TestApplyReaderWriterOptions", gotWriter, tt.wantWriter)
+ })
+ }
+}*/
diff --git a/pkg/frame/reader.go b/pkg/frame/reader.go
new file mode 100644
index 00000000..b2800d32
--- /dev/null
+++ b/pkg/frame/reader.go
@@ -0,0 +1,113 @@
+package frame
+
+import (
+ "context"
+ "sync"
+
+ "github.com/weaveworks/libgitops/pkg/content"
+ "github.com/weaveworks/libgitops/pkg/frame/sanitize"
+ "github.com/weaveworks/libgitops/pkg/tracing"
+ "github.com/weaveworks/libgitops/pkg/util/limitedio"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// newHighlevelReader takes a "low-level" Reader (like *streamingReader or *yamlReader),
+// and implements higher-level logic like proper closing, mutex locking and tracing.
+func newHighlevelReader(r Reader, o *readerOptions) Reader {
+ return &highlevelReader{
+ read: r,
+ readMu: &sync.Mutex{},
+ opts: o,
+ maxTotalFrames: limitedio.Limit(o.MaxFrameCount * 10),
+ }
+}
+
+// highlevelReader uses the closableResource for the mutex locking, properly handling
+// the close logic, and initiating the trace spans. On top of that it records extra
+// tracing context in ReadFrame.
+type highlevelReader struct {
+ read Reader
+ // readMu guards read.ReadFrame
+ readMu *sync.Mutex
+
+ opts *readerOptions
+ // maxTotalFrames is set to opts.MaxFrameCount * 10
+ maxTotalFrames limitedio.Limit
+ // successfulFrameCount counts the amount of successful frames read
+ successfulFrameCount int64
+ // totalFrameCount counts the total amount of frames read (including empty and failed ones)
+ totalFrameCount int64
+}
+
+func (r *highlevelReader) ReadFrame(ctx context.Context) ([]byte, error) {
+ // Make sure we have access to the underlying resource
+ r.readMu.Lock()
+ defer r.readMu.Unlock()
+
+ var frame []byte
+ err := tracing.FromContext(ctx, r).
+ TraceFunc(ctx, "ReadFrame", func(ctx context.Context, span trace.Span) error {
+
+ // Refuse to read more than the maximum amount of successful frames
+ if r.opts.MaxFrameCount.IsLessThan(r.successfulFrameCount) {
+ return ErrFrameCountOverflow(r.opts.MaxFrameCount)
+ }
+
+ // Call the underlying reader
+ var err error
+ frame, err = r.readFrame(ctx)
+ if err != nil {
+ return err
+ }
+
+ // Record how large the frame is, and its content for debugging
+ span.SetAttributes(content.SpanAttrByteContent(frame)...)
+ return nil
+ }).RegisterCustom(content.SpanRegisterReadError)
+ // SpanRegisterReadError registers io.EOF as an "event", and other errors as "unknown errors" in the trace
+ if err != nil {
+ return nil, err
+ }
+ return frame, nil
+}
+
+func (r *highlevelReader) readFrame(ctx context.Context) ([]byte, error) {
+ // Ensure the total number of frames doesn't overflow
+ // TODO: Should this be LT or LTE?
+ if r.maxTotalFrames.IsLessThanOrEqual(r.totalFrameCount) {
+ return nil, ErrFrameCountOverflow(r.maxTotalFrames)
+ }
+ // Read the frame, and increase the total frame counter is increased
+ // This does not at the moment forward the same ReadFrameResult instance,
+ // but that can maybe be done in the future if needed. It would be needed
+ // if the underlying Reader would return an interface that extends more
+ // methods than the default ones.
+ frame, err := r.read.ReadFrame(ctx)
+ r.totalFrameCount += 1
+ if err != nil {
+ return nil, err
+ }
+
+ // Sanitize the frame.
+ frame, err = sanitize.IfSupported(ctx, r.opts.Sanitizer, r.ContentType(), frame)
+ if err != nil {
+ return nil, err
+ }
+
+ // If it's empty, read the next frame automatically
+ if len(frame) == 0 {
+ return r.readFrame(ctx)
+ }
+
+ // Otherwise, if it's non-empty, return it and increase the "successful" counter
+ r.successfulFrameCount += 1
+ // If the frame count now overflows, return a ErrFrameCountOverflow
+ if r.opts.MaxFrameCount.IsLessThan(r.successfulFrameCount) {
+ return nil, ErrFrameCountOverflow(r.opts.MaxFrameCount)
+ }
+ return frame, nil
+}
+
+func (r *highlevelReader) ContentType() content.ContentType { return r.read.ContentType() }
+func (r *highlevelReader) Close(ctx context.Context) error { return closeWithTrace(ctx, r.read, r) }
+func (r *highlevelReader) ContentMetadata() content.Metadata { return r.read.ContentMetadata() }
diff --git a/pkg/frame/reader_factory.go b/pkg/frame/reader_factory.go
new file mode 100644
index 00000000..51e0fb12
--- /dev/null
+++ b/pkg/frame/reader_factory.go
@@ -0,0 +1,74 @@
+package frame
+
+import (
+ "context"
+
+ "github.com/weaveworks/libgitops/pkg/content"
+ "github.com/weaveworks/libgitops/pkg/util/limitedio"
+)
+
+func DefaultFactory() Factory { return defaultFactory{} }
+
+var internalFactoryVar = DefaultFactory()
+
+type defaultFactory struct{}
+
+func (defaultFactory) NewReader(ct content.ContentType, r content.Reader, opts ...ReaderOption) Reader {
+ o := defaultReaderOptions().applyOptions(opts)
+
+ var lowlevel Reader
+ switch ct {
+ case content.ContentTypeYAML:
+ lowlevel = newYAMLReader(r, o)
+ case content.ContentTypeJSON:
+ lowlevel = newJSONReader(r, o)
+ default:
+ return newErrReader(content.ErrUnsupportedContentType(ct), "", r.ContentMetadata())
+ }
+ return newHighlevelReader(lowlevel, o)
+}
+
+func (defaultFactory) NewSingleReader(ct content.ContentType, r content.Reader, opts ...SingleReaderOption) Reader {
+ o := defaultSingleReaderOptions().applyOptions(opts)
+
+ return newHighlevelReader(newSingleReader(r, ct, o), &readerOptions{
+ // Note: The MaxFrameCount == Infinite here makes the singleReader responsible for
+ // counting how many times
+ Options: Options{SingleOptions: o.SingleOptions, MaxFrameCount: limitedio.Infinite},
+ })
+}
+
+func (f defaultFactory) NewRecognizingReader(ctx context.Context, r content.Reader, opts ...RecognizingReaderOption) Reader {
+ o := defaultRecognizingReaderOptions().applyOptions(opts)
+
+ // Recognize the content type using the given recognizer
+ r, ct, err := content.NewRecognizingReader(ctx, r, o.Recognizer)
+ if err != nil {
+ return newErrReader(err, "", r.ContentMetadata())
+ }
+ // Re-use the logic of the "main" Reader constructor; validate ct there
+ return f.NewReader(ct, r, o)
+}
+
+func (defaultFactory) SupportedContentTypes() content.ContentTypes {
+ return []content.ContentType{content.ContentTypeYAML, content.ContentTypeJSON}
+}
+
+func newErrReader(err error, ct content.ContentType, meta content.Metadata) Reader {
+ return &errReader{
+ ct,
+ meta.ToContainer(),
+ &nopCloser{},
+ err,
+ }
+}
+
+// errReader always returns an error
+type errReader struct {
+ content.ContentTyped
+ content.MetadataContainer
+ Closer
+ err error
+}
+
+func (r *errReader) ReadFrame(context.Context) ([]byte, error) { return nil, r.err }
diff --git a/pkg/frame/reader_factory_test.go b/pkg/frame/reader_factory_test.go
new file mode 100644
index 00000000..f0604253
--- /dev/null
+++ b/pkg/frame/reader_factory_test.go
@@ -0,0 +1,60 @@
+package frame
+
+/*var (
+ customErr = errors.New("custom")
+ customErrIoReadCloser = errIoReadCloser(customErr)
+)*/
+
+/*TODO
+func TestNewReader_Unrecognized(t *testing.T) {
+ fr := NewReader(FramingType("doesnotexist"), customErrIoReadCloser)
+ ctx := context.Background()
+ frame, err := fr.ReadFrame(ctx)
+ assert.ErrorIs(t, err, ErrUnsupportedFramingType)
+ assert.Len(t, frame, 0)
+}*/
+
+/*func Test_toReadCloser(t *testing.T) {
+ tmp := t.TempDir()
+ f, err := os.Create(filepath.Join(tmp, "toReadCloser.txt"))
+ require.Nil(t, err)
+ defer f.Close()
+
+ tests := []struct {
+ name string
+ r io.Reader
+ wantHasCloser bool
+ }{
+ {
+ name: "*bytes.Reader",
+ r: bytes.NewReader([]byte("foo")),
+ wantHasCloser: false,
+ },
+ {
+ name: "*os.File",
+ r: f,
+ wantHasCloser: true,
+ },
+ {
+ name: "os.Stdout",
+ r: os.Stdout,
+ wantHasCloser: false,
+ },
+ {
+ name: "",
+ r: errIoReadCloser(nil),
+ wantHasCloser: true,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ gotRc, gotHasCloser := toReadCloser(tt.r)
+ wantRc, _ := tt.r.(io.ReadCloser)
+ if !tt.wantHasCloser {
+ wantRc = io.NopCloser(tt.r)
+ }
+ assert.Equal(t, wantRc, gotRc)
+ assert.Equal(t, tt.wantHasCloser, gotHasCloser)
+ })
+ }
+}*/
diff --git a/pkg/frame/reader_streaming.go b/pkg/frame/reader_streaming.go
new file mode 100644
index 00000000..86efddbd
--- /dev/null
+++ b/pkg/frame/reader_streaming.go
@@ -0,0 +1,115 @@
+package frame
+
+import (
+ "context"
+ "errors"
+ "io"
+
+ "github.com/weaveworks/libgitops/pkg/content"
+ "github.com/weaveworks/libgitops/pkg/util/limitedio"
+ "k8s.io/apimachinery/pkg/runtime/serializer/json"
+ "k8s.io/apimachinery/pkg/runtime/serializer/streaming"
+)
+
+func newYAMLReader(r content.Reader, o *readerOptions) Reader {
+ // json.YAMLFramer.NewFrameReader takes care of the actual YAML framing logic
+ maxFrameSizeInt, err := o.MaxFrameSize.Int()
+ if err != nil {
+ return newErrReader(err, "", r.ContentMetadata())
+ }
+ r = r.Wrap(func(underlying io.ReadCloser) io.Reader {
+ return newK8sYAMLReader(underlying, maxFrameSizeInt)
+ })
+
+ // Mark the content type as YAML
+ r.ContentMetadata().Apply(content.WithContentType(content.ContentTypeYAML))
+
+ return newStreamingReader(content.ContentTypeYAML, r, o.MaxFrameSize)
+}
+
+// newJSONReader creates a "low-level" JSON Reader from the given io.ReadCloser.
+func newJSONReader(r content.Reader, o *readerOptions) Reader {
+ // json.Framer.NewFrameReader takes care of the actual JSON framing logic
+ r = r.Wrap(func(underlying io.ReadCloser) io.Reader {
+ return json.Framer.NewFrameReader(underlying)
+ })
+
+ // Mark the content type as JSON
+ r.ContentMetadata().Apply(content.WithContentType(content.ContentTypeJSON))
+
+ return newStreamingReader(content.ContentTypeJSON, r, o.MaxFrameSize)
+}
+
+// newStreamingReader makes a generic Reader that reads from an io.ReadCloser returned
+// from Kubernetes' runtime.Framer.NewFrameReader, in exactly the way
+// k8s.io/apimachinery/pkg/runtime/serializer/streaming implements this.
+// On a high-level, it means that many small Read(p []byte) calls are made as long as
+// io.ErrShortBuffer is returned. When err == nil is returned from rc, we know that we're
+// at the end of a frame, and at that point the frame is returned.
+//
+// Note: This Reader is a so-called "low-level" one. It doesn't do tracing, mutex locking, or
+// proper closing logic. It must be wrapped by a composite, high-level Reader like highlevelReader.
+func newStreamingReader(ct content.ContentType, r content.Reader, maxFrameSize limitedio.Limit) Reader {
+ // Limit the amount of bytes read from the content.Reader
+ r, resetCounter := content.WrapLimited(r, maxFrameSize)
+ // Wrap
+ cr := r.WrapSegment(func(rc io.ReadCloser) content.RawSegmentReader {
+ return newK8sStreamingReader(rc, maxFrameSize.Int64())
+ })
+
+ return &streamingReader{
+ // Clone the metadata and expose it
+ // TODO: Maybe ReaderOptions should allow changing it?
+ MetadataContainer: r.ContentMetadata().Clone().ToContainer(),
+ ContentTyped: ct,
+ resetCounter: resetCounter,
+ cr: cr,
+ maxFrameSize: maxFrameSize,
+ }
+}
+
+// streamingReader is a small "conversion" struct that implements the Reader interface for a
+// given k8sStreamingReader. When reader_streaming_k8s.go is upstreamed, we can replace the
+// temporary k8sStreamingReader interface with a "proper" Kubernetes one.
+type streamingReader struct {
+ content.MetadataContainer
+ content.ContentTyped
+ resetCounter content.ResetCounterFunc
+ cr content.SegmentReader
+ maxFrameSize limitedio.Limit
+}
+
+func (r *streamingReader) ReadFrame(ctx context.Context) ([]byte, error) {
+ // Read one frame from the streamReader
+ frame, err := r.cr.WithContext(ctx).Read()
+ if err != nil {
+ // Transform streaming.ErrObjectTooLarge to a ErrFrameSizeOverflow, if returned.
+ return nil, mapError(err, errorMappings{
+ streaming.ErrObjectTooLarge: func() error {
+ return limitedio.ErrReadSizeOverflow(r.maxFrameSize)
+ },
+ })
+ }
+ // Reset the counter only when we have a successful frame
+ r.resetCounter()
+ return frame, nil
+}
+
+func (r *streamingReader) Close(ctx context.Context) error { return r.cr.WithContext(ctx).Close() }
+
+// mapError is an utility for mapping a "actual" error to a lazily-evaluated "desired" one.
+// Equality between the errorMappings' keys and err is defined by errors.Is
+func mapError(err error, f errorMappings) error {
+ for target, mkErr := range f {
+ if errors.Is(err, target) {
+ return mkErr()
+ }
+ }
+ return err
+}
+
+// errorMappings maps actual errors to lazily-evaluated desired ones
+type errorMappings map[error]mkErrorFunc
+
+// mkErrorFunc lazily creates an error
+type mkErrorFunc func() error
diff --git a/pkg/frame/reader_test.go b/pkg/frame/reader_test.go
new file mode 100644
index 00000000..32d4ab44
--- /dev/null
+++ b/pkg/frame/reader_test.go
@@ -0,0 +1,526 @@
+package frame
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/weaveworks/libgitops/pkg/content"
+ "github.com/weaveworks/libgitops/pkg/tracing"
+ "github.com/weaveworks/libgitops/pkg/util/compositeio"
+ "github.com/weaveworks/libgitops/pkg/util/limitedio"
+ "go.opentelemetry.io/otel/trace"
+ "go.uber.org/zap/zapcore"
+ "k8s.io/utils/pointer"
+ "sigs.k8s.io/controller-runtime/pkg/log"
+ "sigs.k8s.io/controller-runtime/pkg/log/zap"
+)
+
+func init() {
+ // Set up the global logger
+ log.SetLogger(zap.New(zap.ConsoleEncoder(func(ec *zapcore.EncoderConfig) {
+ ec.TimeKey = ""
+ }))) // zap.JSONEncoder()
+
+ err := tracing.NewBuilder().
+ //RegisterStdoutExporter(stdouttrace.WithWriter(io.Discard)).
+ RegisterInsecureJaegerExporter("").
+ //WithLogging(true).
+ InstallGlobally()
+ if err != nil {
+ fmt.Printf("failed to install tracing provider: %v\n", err)
+ os.Exit(1)
+ }
+}
+
+// TODO: Make sure that len(frame) == 0 when err != nil for the Writer.
+
+// TODO: Test the output traces more througoutly, when there is SpanProcessor that supports writing
+// relevant data to a file, and do matching between spans.
+
+// TODO: Make some 16M (or more) JSON/YAML files and show that these are readable (or not). That's not
+// testing a case that already isn't tested by the unit tests below, but would be a good marker that
+// it actually solves the right problem.
+
+// TODO: Maybe add some race-condition tests? The centralized place mutexes are used are in
+// highlevel{Reader,Writer}, so that'd be the place in that case.
+
+type testcase struct {
+ singleReadOpts []SingleReaderOption
+ singleWriteOpts []SingleWriterOption
+ // single{Read,Write}Opts are automatically casted to {Reader,Writer}Options if possible
+ // and included in readOpts and writeOpts; no need to specify twice
+ readOpts []ReaderOption
+ writeOpts []WriterOption
+ // {read,write}Opts are automatically casted to Recognizing{Reader,Writer}Options if possible
+ // and included in recognizing{Read,Write}Opts; no need to specify twice
+ recognizingReadOpts []RecognizingReaderOption
+ recognizingWriteOpts []RecognizingWriterOption
+
+ name string
+ testdata []testdata
+ // Reader.ReadFrame will be called len(readResults) times. If a err == nil return is expected, just put
+ // nil in the error slice. Similarly for Writer.WriteFrame and writeResults.
+ // Note that len(readResults) >= len(frames) and len(writeResults) >= len(frames) must hold.
+ // By issuing more reads or writes than there are frames, one can check the error behavior
+ readResults []error
+ writeResults []error
+ // if closeWriterIdx or closeReaderIdx are non-nil, the Reader/Writer will be closed after the read at
+ // that specified index. closeWriterErr and closeReaderErr can be used to check the error returned by
+ // the close call.
+ closeWriterIdx *int64
+ closeWriterErr error
+ //expectWriterClosed bool
+ closeReaderIdx *int64
+ closeReaderErr error
+
+ //expectReaderCloser bool
+}
+
+type testdata struct {
+ ct content.ContentType
+ single, recognizing bool
+ // frames contain the individual frames of rawData, which in turn is the content of the underlying
+ // source/stream. if len(writeResults) == 0, there will be no checking that writing all frames
+ // in order will produce the correct rawData. if len(readResults) == 0, there will be no checking
+ // that reading rawData will produce the frames string
+ rawData string
+ frames []string
+}
+
+const (
+ yamlSep = "---\n"
+ noNewlineYAML = `foobar: true`
+ testYAML = noNewlineYAML + "\n"
+ testYAMLlen = int64(len(testYAML))
+ messyYAMLP1 = `
+---
+
+---
+` + noNewlineYAML + `
+`
+ messyYAMLP2 = `
+
+---
+---
+` + noNewlineYAML + `
+---`
+ messyYAML = messyYAMLP1 + messyYAMLP2
+
+ testJSON = `{"foo":true}
+`
+ testJSONlen = int64(len(testJSON))
+ testJSON2 = `{"bar":"hello"}
+`
+ messyJSONP1 = `
+
+` + testJSON + `
+`
+ messyJSONP2 = `
+
+` + testJSON + `
+`
+ messyJSON = messyJSONP1 + messyJSONP2
+
+ otherCT = content.ContentType("other")
+ otherFrame = "('other'; 9)\n('bar'; true)"
+ otherFrameLen = int64(len(otherFrame))
+)
+
+func TestReader(t *testing.T) {
+ // Some tests depend on this
+ require.Equal(t, testYAMLlen, testJSONlen)
+ NewFactoryTester(t, defaultFactory{}).Test()
+ assert.Nil(t, tracing.ForceFlushGlobal(context.Background(), 0))
+}
+
+// TODO: Test that closing of Readers and Writers works
+
+var defaultTestCases = []testcase{
+ // Roundtrip cases
+ {
+ name: "simple roundtrip",
+ testdata: []testdata{
+ {ct: content.ContentTypeYAML, frames: []string{testYAML}, rawData: yamlSep + testYAML},
+ {ct: content.ContentTypeJSON, frames: []string{testJSON}, rawData: testJSON},
+ },
+ writeResults: []error{nil, nil, nil, nil},
+ readResults: []error{nil, io.EOF, io.EOF, io.EOF},
+ },
+
+ {
+ name: "two-frame roundtrip with closed writer",
+ testdata: []testdata{
+ {ct: content.ContentTypeYAML, frames: []string{testYAML, testYAML}, rawData: yamlSep + testYAML + yamlSep + testYAML},
+ {ct: content.ContentTypeJSON, frames: []string{testJSON, testJSON2}, rawData: testJSON + testJSON2},
+ },
+ writeResults: []error{nil, nil, nil, nil},
+ readResults: []error{nil, nil, io.EOF, io.EOF},
+ },
+ // YAML newline addition
+ {
+ name: "YAML Read: a newline will be added",
+ testdata: []testdata{
+ {ct: content.ContentTypeYAML, rawData: noNewlineYAML, frames: []string{testYAML}},
+ },
+ readResults: []error{nil, io.EOF},
+ },
+ {
+ name: "YAML Write: a newline will be added",
+ testdata: []testdata{
+ {ct: content.ContentTypeYAML, frames: []string{noNewlineYAML}, rawData: yamlSep + testYAML},
+ },
+ writeResults: []error{nil},
+ },
+ // Empty frames
+ {
+ name: "Read: io.EOF when there are no non-empty frames",
+ testdata: []testdata{
+ {ct: content.ContentTypeYAML, rawData: "---"},
+ {ct: content.ContentTypeYAML, rawData: "---\n"},
+ {ct: content.ContentTypeJSON, rawData: ""},
+ {ct: content.ContentTypeJSON, rawData: " \n "},
+ },
+ readResults: []error{io.EOF},
+ },
+ {
+ name: "Write: Empty sanitized frames aren't written",
+ testdata: []testdata{
+ {ct: content.ContentTypeYAML, frames: []string{"---", "---\n", " \n--- \n---"}},
+ {ct: content.ContentTypeJSON, frames: []string{"", " \n ", " "}},
+ },
+ writeResults: []error{nil, nil, nil},
+ },
+ {
+ name: "Write: can write empty frames forever without errors",
+ testdata: []testdata{
+ {ct: content.ContentTypeYAML, frames: []string{testYAML, testYAML}, rawData: yamlSep + testYAML + yamlSep + testYAML},
+ {ct: content.ContentTypeJSON, frames: []string{testJSON, testJSON2}, rawData: testJSON + testJSON2},
+ },
+ writeResults: []error{nil, nil, nil, nil, nil},
+ readResults: []error{nil, nil, io.EOF},
+ },
+ // Sanitation
+ {
+ name: "YAML Read: a leading \\n--- will be ignored",
+ testdata: []testdata{
+ {ct: content.ContentTypeYAML, rawData: "\n" + yamlSep + noNewlineYAML, frames: []string{testYAML}},
+ },
+ readResults: []error{nil, io.EOF},
+ },
+ {
+ name: "YAML Read: a leading --- will be ignored",
+ testdata: []testdata{
+ {ct: content.ContentTypeYAML, rawData: yamlSep + noNewlineYAML, frames: []string{testYAML}},
+ },
+ readResults: []error{nil, io.EOF},
+ },
+ {
+ name: "Read: sanitize messy content",
+ testdata: []testdata{
+ {ct: content.ContentTypeYAML, rawData: messyYAML, frames: []string{testYAML, testYAML}},
+ {ct: content.ContentTypeJSON, rawData: messyJSON, frames: []string{testJSON, testJSON}},
+ },
+ readResults: []error{nil, nil, io.EOF},
+ },
+ {
+ name: "Write: sanitize messy content",
+ testdata: []testdata{
+ {ct: content.ContentTypeYAML, frames: []string{messyYAMLP1, messyYAMLP2}, rawData: yamlSep + testYAML + yamlSep + testYAML},
+ {ct: content.ContentTypeJSON, frames: []string{messyJSONP1, messyJSONP2}, rawData: testJSON + testJSON},
+ },
+ writeResults: []error{nil, nil},
+ },
+ // MaxFrameSize
+ {
+ name: "Read: the frame size is exactly within bounds, also enforce counter reset",
+ testdata: []testdata{
+ {ct: content.ContentTypeYAML, rawData: yamlSep + testYAML + yamlSep + testYAML, frames: []string{testYAML, testYAML}},
+ {ct: content.ContentTypeJSON, rawData: testJSON + testJSON, frames: []string{testJSON, testJSON}},
+ },
+ singleReadOpts: []SingleReaderOption{&SingleOptions{MaxFrameSize: limitedio.Limit(testYAMLlen)}},
+ readResults: []error{nil, nil, io.EOF},
+ },
+ {
+ name: "YAML Read: there is a newline before the initial ---, should sanitize",
+ testdata: []testdata{
+ {ct: content.ContentTypeYAML, rawData: "\n" + yamlSep + testYAML + yamlSep + testYAML, frames: []string{testYAML, testYAML}},
+ },
+ singleReadOpts: []SingleReaderOption{&SingleOptions{MaxFrameSize: limitedio.Limit(testYAMLlen)}},
+ readResults: []error{nil, nil, io.EOF},
+ },
+ {
+ name: "Read: the frame is out of bounds, on the same line",
+ testdata: []testdata{
+ {ct: content.ContentTypeYAML, rawData: testYAML},
+ {ct: content.ContentTypeJSON, rawData: testJSON},
+ },
+ singleReadOpts: []SingleReaderOption{&SingleOptions{MaxFrameSize: limitedio.Limit(testYAMLlen - 2)}},
+ readResults: []error{&limitedio.ReadSizeOverflowError{}},
+ },
+ {
+ name: "YAML Read: the frame is out of bounds, but continues on the next line",
+ testdata: []testdata{
+ {ct: content.ContentTypeYAML, rawData: testYAML + testYAML},
+ },
+ singleReadOpts: []SingleReaderOption{&SingleOptions{MaxFrameSize: limitedio.Limit(testYAMLlen)}},
+ readResults: []error{&limitedio.ReadSizeOverflowError{}},
+ },
+ {
+ name: "Read: first frame ok, then always frame overflow",
+ testdata: []testdata{
+ {ct: content.ContentTypeYAML, rawData: testYAML + yamlSep + testYAML + testYAML, frames: []string{testYAML}},
+ {ct: content.ContentTypeJSON, rawData: testJSON + testJSON2, frames: []string{testJSON}},
+ },
+ singleReadOpts: []SingleReaderOption{&SingleOptions{MaxFrameSize: limitedio.Limit(testYAMLlen)}},
+ readResults: []error{nil, &limitedio.ReadSizeOverflowError{}, &limitedio.ReadSizeOverflowError{}, &limitedio.ReadSizeOverflowError{}},
+ },
+ {
+ name: "Write: the second frame is too large, ignore that, but allow writing smaller frames later",
+ testdata: []testdata{
+ {ct: content.ContentTypeYAML, frames: []string{testYAML, testYAML + testYAML, testYAML}, rawData: yamlSep + testYAML + yamlSep + testYAML},
+ {ct: content.ContentTypeJSON, frames: []string{testJSON, testJSON2, testJSON}, rawData: testJSON + testJSON},
+ },
+ singleWriteOpts: []SingleWriterOption{&SingleOptions{MaxFrameSize: limitedio.Limit(testYAMLlen)}},
+ writeResults: []error{nil, &limitedio.ReadSizeOverflowError{}, nil},
+ },
+ // TODO: test negative limits too
+ {
+ name: "first frame ok, then Read => EOF and Write => nil consistently",
+ testdata: []testdata{
+ {ct: content.ContentTypeYAML, frames: []string{testYAML}, rawData: yamlSep + testYAML},
+ {ct: content.ContentTypeJSON, frames: []string{testJSON}, rawData: testJSON},
+ },
+ readResults: []error{nil, io.EOF, io.EOF, io.EOF, io.EOF},
+ writeResults: []error{nil, nil, nil, nil, nil},
+ },
+ // MaxFrameCount
+ {
+ name: "Write: Don't allow writing more than a maximum amount of frames",
+ testdata: []testdata{
+ {ct: content.ContentTypeYAML, frames: []string{testYAML, testYAML, testYAML}, rawData: yamlSep + testYAML + yamlSep + testYAML},
+ {ct: content.ContentTypeJSON, frames: []string{testJSON, testJSON, testJSON}, rawData: testJSON + testJSON},
+ },
+ writeResults: []error{nil, nil, &FrameCountOverflowError{}, &FrameCountOverflowError{}},
+ writeOpts: []WriterOption{&Options{MaxFrameCount: 2}},
+ },
+ {
+ name: "Read: Don't allow reading more than a maximum amount of successful frames",
+ testdata: []testdata{
+ {ct: content.ContentTypeYAML,
+ rawData: testYAML + yamlSep + testYAML + yamlSep + testYAML,
+ frames: []string{testYAML, testYAML}},
+ {ct: content.ContentTypeJSON,
+ rawData: testJSON + testJSON + testJSON,
+ frames: []string{testJSON, testJSON}},
+ },
+ readResults: []error{nil, nil, &FrameCountOverflowError{}, &FrameCountOverflowError{}},
+ readOpts: []ReaderOption{&Options{MaxFrameCount: 2}},
+ },
+ {
+ name: "Read: Don't allow reading more than a maximum amount of successful frames, and 10x in total",
+ testdata: []testdata{
+ {ct: content.ContentTypeYAML,
+ rawData: strings.Repeat("\n"+yamlSep, 10) + testYAML},
+ },
+ readResults: []error{&FrameCountOverflowError{}, &FrameCountOverflowError{}},
+ readOpts: []ReaderOption{&Options{MaxFrameCount: 1}},
+ },
+ {
+ name: "Read: Allow reading up to the maximum amount of 10x the successful frames count",
+ testdata: []testdata{
+ {ct: content.ContentTypeYAML,
+ rawData: strings.Repeat("\n"+yamlSep, 9) + testYAML + yamlSep + yamlSep, frames: []string{testYAML}},
+ },
+ readResults: []error{nil, &FrameCountOverflowError{}, &FrameCountOverflowError{}},
+ readOpts: []ReaderOption{&Options{MaxFrameCount: 1}},
+ },
+ {
+ name: "Read: Allow reading exactly that amount of successful frames, if then io.EOF",
+ testdata: []testdata{
+ {ct: content.ContentTypeYAML,
+ rawData: testYAML + yamlSep + testYAML,
+ frames: []string{testYAML, testYAML}},
+ {ct: content.ContentTypeJSON,
+ rawData: testJSON + testJSON,
+ frames: []string{testJSON, testJSON}},
+ },
+ readResults: []error{nil, nil, io.EOF, io.EOF},
+ readOpts: []ReaderOption{&Options{MaxFrameCount: 2}},
+ },
+ // Other Framing Types and Single
+ {
+ name: "Roundtrip: Allow reading other framing types for single reader, check overflows too",
+ testdata: []testdata{
+ {ct: otherCT, single: true, rawData: otherFrame, frames: []string{otherFrame}},
+ },
+ writeResults: []error{nil, &FrameCountOverflowError{}, &FrameCountOverflowError{}, &FrameCountOverflowError{}},
+ readResults: []error{nil, io.EOF, io.EOF, io.EOF},
+ },
+ {
+ name: "Read: other framing type frame size is exactly within bounds",
+ testdata: []testdata{
+ {ct: otherCT, single: true, rawData: otherFrame, frames: []string{otherFrame}},
+ },
+ singleReadOpts: []SingleReaderOption{SingleOptions{MaxFrameSize: limitedio.Limit(otherFrameLen)}},
+ readResults: []error{nil, io.EOF},
+ },
+ {
+ name: "Read: other framing type frame size overflow",
+ testdata: []testdata{
+ {ct: otherCT, single: true, rawData: otherFrame},
+ },
+ singleReadOpts: []SingleReaderOption{SingleOptions{MaxFrameSize: limitedio.Limit(otherFrameLen - 1)}},
+ readResults: []error{&limitedio.ReadSizeOverflowError{}, io.EOF, io.EOF},
+ },
+ {
+ name: "Write: other framing type frame size overflow",
+ testdata: []testdata{
+ {ct: otherCT, single: true, frames: []string{otherFrame, otherFrame}},
+ },
+ singleWriteOpts: []SingleWriterOption{SingleOptions{MaxFrameSize: limitedio.Limit(otherFrameLen - 1)}},
+ writeResults: []error{&limitedio.ReadSizeOverflowError{}, &limitedio.ReadSizeOverflowError{}, nil},
+ },
+}
+
+func NewFactoryTester(t *testing.T, f Factory) *FactoryTester {
+ return &FactoryTester{
+ t: t,
+ factory: f,
+ cases: defaultTestCases,
+ }
+}
+
+type FactoryTester struct {
+ t *testing.T
+ factory Factory
+
+ cases []testcase
+}
+
+func (h *FactoryTester) Test() {
+ for _, c := range h.cases {
+ h.t.Run(c.name, func(t *testing.T) {
+ h.testRoundtripCase(t, &c)
+ })
+ }
+}
+
+func (h *FactoryTester) testRoundtripCase(t *testing.T, c *testcase) {
+ sropt := (&singleReaderOptions{}).applyOptions(c.singleReadOpts)
+ swopt := (&singleWriterOptions{}).applyOptions(c.singleWriteOpts)
+ ropt := (&readerOptions{}).applyOptions(c.readOpts)
+ wopt := (&writerOptions{}).applyOptions(c.writeOpts)
+
+ c.readOpts = append(c.readOpts, sropt)
+ c.recognizingReadOpts = append(c.recognizingReadOpts, sropt)
+ c.recognizingReadOpts = append(c.recognizingReadOpts, ropt)
+
+ c.writeOpts = append(c.writeOpts, swopt)
+ c.recognizingWriteOpts = append(c.recognizingWriteOpts, swopt)
+ c.recognizingWriteOpts = append(c.recognizingWriteOpts, wopt)
+
+ ctx := context.Background()
+ for i, data := range c.testdata {
+ subName := fmt.Sprintf("%d %s", i, data.ct)
+ t.Run(subName, func(t *testing.T) {
+ tr := tracing.TracerOptions{Name: fmt.Sprintf("%s %s", c.name, subName), UseGlobal: pointer.Bool(true)}
+ _ = tr.TraceFunc(ctx, "", func(ctx context.Context, _ trace.Span) error {
+ h.testRoundtripCaseContentType(t, ctx, c, &data)
+ return nil
+ }).Register()
+ })
+ }
+}
+
+func (h *FactoryTester) testRoundtripCaseContentType(t *testing.T, ctx context.Context, c *testcase, d *testdata) {
+ var buf bytes.Buffer
+
+ readCloseCounter := &recordingCloser{}
+ writeCloseCounter := &recordingCloser{}
+ cw := content.NewWriter(compositeio.WriteCloser(&buf, writeCloseCounter))
+ cr := content.NewReader(compositeio.ReadCloser(&buf, readCloseCounter))
+ var w Writer
+ if d.single && d.recognizing {
+ panic("cannot be both single and recognizing")
+ } else if d.single && !d.recognizing {
+ w = h.factory.NewSingleWriter(d.ct, cw, c.singleWriteOpts...)
+ } else if !d.single && d.recognizing {
+ w = h.factory.NewRecognizingWriter(cw, c.recognizingWriteOpts...)
+ } else {
+ w = h.factory.NewWriter(d.ct, cw, c.writeOpts...)
+ }
+ assert.Equalf(t, w.ContentType(), d.ct, "Writer.content.ContentType")
+
+ var r Reader
+ if d.single && d.recognizing {
+ panic("cannot be both single and recognizing")
+ } else if d.single && !d.recognizing {
+ r = h.factory.NewSingleReader(d.ct, cr, c.singleReadOpts...)
+ } else if !d.single && d.recognizing {
+ r = h.factory.NewRecognizingReader(ctx, cr, c.recognizingReadOpts...)
+ } else {
+ r = h.factory.NewReader(d.ct, cr, c.readOpts...)
+ }
+ assert.Equalf(t, r.ContentType(), d.ct, "Reader.content.ContentType")
+
+ // Write frames using the writer
+ for i, expected := range c.writeResults {
+ var frame []byte
+ // Only write a frame using the writer if one is supplied
+ if i < len(d.frames) {
+ frame = []byte(d.frames[i])
+ }
+
+ // Write the frame using the writer and check the error
+ got := w.WriteFrame(ctx, frame)
+ assert.ErrorIsf(t, got, expected, "Writer.WriteFrame err %d", i)
+
+ // If we should close the writer here, do it and check the expected error
+ if c.closeWriterIdx != nil && *c.closeWriterIdx == int64(i) {
+ assert.ErrorIsf(t, w.Close(ctx), c.closeWriterErr, "Writer.Close err %d", i)
+ }
+ }
+
+ assert.Equalf(t, 0, writeCloseCounter.count, "Writer should not be closed")
+
+ // Check that the written output was as expected, if writing is enabled
+ if len(c.writeResults) != 0 {
+ assert.Equalf(t, d.rawData, buf.String(), "Writer Output")
+ } else {
+ // If writing was not tested, make sure the buffer contains the raw data for reading
+ buf = *bytes.NewBufferString(d.rawData)
+ }
+
+ // Read frames using the reader
+ for i, expected := range c.readResults {
+ // Check the expected error
+ frame, err := r.ReadFrame(ctx)
+ assert.ErrorIsf(t, err, expected, "Reader.ReadFrame err %d", i)
+ // Only check the frame content if there's an expected frame
+ if i < len(d.frames) {
+ assert.Equalf(t, d.frames[i], string(frame), "Reader.ReadFrame frame %d", i)
+ }
+
+ // If we should close the reader here, do it and check the expected error
+ if c.closeReaderIdx != nil && *c.closeReaderIdx == int64(i) {
+ assert.ErrorIsf(t, r.Close(ctx), c.closeReaderErr, "Reader.Close err %d", i)
+ }
+ }
+ assert.Equalf(t, 0, readCloseCounter.count, "Reader should not be closed")
+}
+
+type recordingCloser struct {
+ count int
+}
+
+func (c *recordingCloser) Close() error {
+ c.count += 1
+ return nil
+}
diff --git a/pkg/serializer/comments/LICENSE b/pkg/frame/sanitize/comments/LICENSE
similarity index 100%
rename from pkg/serializer/comments/LICENSE
rename to pkg/frame/sanitize/comments/LICENSE
diff --git a/pkg/serializer/comments/comments.go b/pkg/frame/sanitize/comments/comments.go
similarity index 100%
rename from pkg/serializer/comments/comments.go
rename to pkg/frame/sanitize/comments/comments.go
diff --git a/pkg/serializer/comments/comments_test.go b/pkg/frame/sanitize/comments/comments_test.go
similarity index 99%
rename from pkg/serializer/comments/comments_test.go
rename to pkg/frame/sanitize/comments/comments_test.go
index 233feeec..dfd874bb 100644
--- a/pkg/serializer/comments/comments_test.go
+++ b/pkg/frame/sanitize/comments/comments_test.go
@@ -226,7 +226,8 @@ items:
- c
- b
`,
- }, {
+ },
+ {
name: "copy_item_comments_no_match",
from: `
apiVersion: apps/v1
diff --git a/pkg/serializer/comments/lost.go b/pkg/frame/sanitize/comments/lost.go
similarity index 100%
rename from pkg/serializer/comments/lost.go
rename to pkg/frame/sanitize/comments/lost.go
diff --git a/pkg/frame/sanitize/sanitize.go b/pkg/frame/sanitize/sanitize.go
new file mode 100644
index 00000000..71bebe82
--- /dev/null
+++ b/pkg/frame/sanitize/sanitize.go
@@ -0,0 +1,263 @@
+package sanitize
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "strings"
+
+ "github.com/weaveworks/libgitops/pkg/content"
+ "github.com/weaveworks/libgitops/pkg/frame/sanitize/comments"
+ "k8s.io/utils/pointer"
+ "sigs.k8s.io/kustomize/kyaml/kio"
+ "sigs.k8s.io/kustomize/kyaml/yaml"
+)
+
+// Sanitizer is an interface for sanitizing frames. Note that a sanitizer can only do
+// its work correctly if frame actually only contains one frame within.
+type Sanitizer interface {
+ // Sanitize sanitizes the frame in a standardized way for the given
+ // FramingType. If the FramingType isn't known, the Sanitizer can choose between
+ // returning an ErrUnsupportedFramingType error or just returning frame, nil unmodified.
+ // If ErrUnsupportedFramingType is returned, the consumer won't probably be able to handle
+ // other framing types than the default ones, which might not be desired.
+ //
+ // The returned frame should have len == 0 if it's considered empty.
+ Sanitize(ctx context.Context, ct content.ContentType, frame []byte) ([]byte, error)
+
+ content.ContentTypeSupporter
+}
+
+// defaultSanitizer implements frame sanitation for JSON and YAML.
+//
+// For YAML it removes unnecessary "---" separators, whitespace and newlines.
+// The YAML frame always ends with a newline, unless the sanitized YAML was an empty string, in which
+// case an empty string with len == 0 will be returned.
+//
+// For JSON it sanitizes the JSON frame by removing unnecessary spaces and newlines around it.
+func NewJSONYAML(opts ...JSONYAMLOption) Sanitizer {
+ return &defaultSanitizer{defaultJSONYAMLOptions().applyOptions(opts)}
+}
+
+func WithCompactIndent() JSONYAMLOption {
+ return WithSpacesIndent(0)
+}
+
+func WithSpacesIndent(spaces uint8) JSONYAMLOption {
+ i := strings.Repeat(" ", int(spaces))
+ return &jsonYAMLOptions{Indentation: &i}
+}
+
+func WithTabsIndent(tabs uint8) JSONYAMLOption {
+ i := strings.Repeat("\t", int(tabs))
+ return &jsonYAMLOptions{Indentation: &i}
+}
+
+func WithCompactSeqIndent() JSONYAMLOption {
+ return &jsonYAMLOptions{ForceSeqIndentStyle: yaml.CompactSequenceStyle}
+}
+
+func WithWideSeqIndent() JSONYAMLOption {
+ return &jsonYAMLOptions{ForceSeqIndentStyle: yaml.WideSequenceStyle}
+}
+
+func WithNoCommentsCopy() JSONYAMLOption {
+ return &jsonYAMLOptions{CopyComments: pointer.Bool(false)}
+}
+
+type JSONYAMLOption interface {
+ applyToJSONYAML(*jsonYAMLOptions)
+}
+
+type jsonYAMLOptions struct {
+ // Only applicable to JSON at the moment; YAML indentation config not supported
+ Indentation *string
+ // Only applicable to YAML; either yaml.CompactSequenceStyle or yaml.WideSequenceStyle
+ ForceSeqIndentStyle yaml.SequenceIndentStyle
+ // Only applicable to YAML; JSON doesn't support comments
+ CopyComments *bool
+ ClearEmptyFields [][]string
+ /*
+ TODO: ForceMapKeyOrder that can either be
+ - PreserveOrder (if unset) => preserves the order from the prior if given. no-op if no prior.
+ - Alphabetic => sorts all keys alphabetically
+ - None => don't preserve order from the prior; no-op
+ */
+}
+
+func defaultJSONYAMLOptions() *jsonYAMLOptions {
+ return (&jsonYAMLOptions{
+ Indentation: pointer.String(""),
+ CopyComments: pointer.Bool(true),
+ ClearEmptyFields: [][]string{
+ []string{"metadata", "creationTimestamp"},
+ []string{"status"},
+ },
+ })
+}
+
+func (o *jsonYAMLOptions) applyToJSONYAML(target *jsonYAMLOptions) {
+ if o.Indentation != nil {
+ target.Indentation = o.Indentation
+ }
+ if len(o.ForceSeqIndentStyle) != 0 {
+ target.ForceSeqIndentStyle = o.ForceSeqIndentStyle
+ }
+ if o.CopyComments != nil {
+ target.CopyComments = o.CopyComments
+ }
+ if o.ClearEmptyFields != nil {
+ target.ClearEmptyFields = o.ClearEmptyFields
+ }
+}
+
+func (o *jsonYAMLOptions) applyOptions(opts []JSONYAMLOption) *jsonYAMLOptions {
+ for _, opt := range opts {
+ opt.applyToJSONYAML(o)
+ }
+ return o
+}
+
+type defaultSanitizer struct {
+ opts *jsonYAMLOptions
+}
+
+func (s *defaultSanitizer) Sanitize(ctx context.Context, ct content.ContentType, frame []byte) ([]byte, error) {
+ switch ct {
+ case content.ContentTypeYAML:
+ return s.handleYAML(ctx, frame)
+ case content.ContentTypeJSON:
+ return s.handleJSON(frame)
+ default:
+ // Just passthrough
+ return frame, nil
+ }
+}
+
+func (defaultSanitizer) SupportedContentTypes() content.ContentTypes {
+ return []content.ContentType{content.ContentTypeYAML, content.ContentTypeJSON}
+}
+
+var ErrTooManyFrames = errors.New("too many frames")
+
+/*
+- New policy got applied to all files
+- Previously existing policy got applied
+*/
+
+// TODO: Make sure maps are alphabetically sorted, or match the prior
+// Can e.g. use https://github.com/kubernetes-sigs/kustomize/blob/master/kyaml/order/syncorder.go
+func (s *defaultSanitizer) handleYAML(ctx context.Context, frame []byte) ([]byte, error) {
+ // Get prior data, if any (from the context), that we'll use to copy comments over and
+ // infer the sequence indenting style.
+ priorData, hasPriorData := GetPriorData(ctx)
+
+ // Parse the current node
+ frameNodes, err := (&kio.ByteReader{
+ // TODO: Is this a bug in kyaml?
+ Reader: bytes.NewReader(append([]byte{'\n'}, frame...)),
+ DisableUnwrapping: true,
+ OmitReaderAnnotations: true,
+ }).Read()
+ if err != nil {
+ return nil, err
+ }
+ if len(frameNodes) == 0 {
+ return []byte{}, nil
+ } else if len(frameNodes) != 1 {
+ return nil, ErrTooManyFrames
+ }
+ frameNode := frameNodes[0]
+
+ if hasPriorData && s.opts.CopyComments != nil && *s.opts.CopyComments {
+ priorNode, err := yaml.Parse(string(priorData))
+ if err != nil {
+ return nil, err
+ }
+ // Copy comments over
+ if err := comments.CopyComments(priorNode, frameNode, true); err != nil {
+ return nil, err
+ }
+ }
+
+ for _, clearPath := range s.opts.ClearEmptyFields {
+ if len(clearPath) == 0 {
+ continue
+ }
+ filters := []yaml.Filter{}
+ if len(clearPath) > 1 {
+ // lookup the elements before the last element
+ filters = append(filters, yaml.Lookup(clearPath[:len(clearPath)-1]...))
+ }
+ filters = append(filters, yaml.FieldClearer{
+ Name: clearPath[len(clearPath)-1], // clear the last element
+ IfEmpty: true,
+ })
+ if err := frameNode.PipeE(filters...); err != nil {
+ return nil, err
+ }
+ }
+
+ return yaml.MarshalWithOptions(frameNode.Document(), &yaml.EncoderOptions{
+ SeqIndent: s.resolveSeqStyle(frame, priorData, hasPriorData),
+ })
+}
+
+func (s *defaultSanitizer) resolveSeqStyle(frame, priorData []byte, hasPriorData bool) yaml.SequenceIndentStyle {
+ // If specified, use these; can be used as "force-formatting" directives for consistency
+ if len(s.opts.ForceSeqIndentStyle) != 0 {
+ return s.opts.ForceSeqIndentStyle
+ }
+ // Otherwise, autodetect the indentation from prior data, if exists, or the current frame
+ // If the sequence style cannot be derived; the compact form will be used
+ var deriveYAML string
+ if hasPriorData {
+ deriveYAML = string(priorData)
+ } else {
+ deriveYAML = string(frame)
+ }
+ return yaml.SequenceIndentStyle(yaml.DeriveSeqIndentStyle(deriveYAML))
+}
+
+// TODO: Maybe use the "Remarshal" property defined here to apply alphabetic order?
+// https://stackoverflow.com/questions/18668652/how-to-produce-json-with-sorted-keys-in-go
+func (s *defaultSanitizer) handleJSON(frame []byte) ([]byte, error) {
+ // If it's all whitespace, just return an empty byte array, no actual content here
+ if len(bytes.TrimSpace(frame)) == 0 {
+ return []byte{}, nil
+ }
+ var buf bytes.Buffer
+ var err error
+ if s.opts.Indentation == nil || len(*s.opts.Indentation) == 0 {
+ err = json.Compact(&buf, frame)
+ } else {
+ err = json.Indent(&buf, frame, "", *s.opts.Indentation)
+ }
+ if err != nil {
+ return nil, err
+ }
+ // Trim all other spaces than an ending newline
+ return append(bytes.TrimSpace(buf.Bytes()), '\n'), nil
+}
+
+func IfSupported(ctx context.Context, s Sanitizer, ct content.ContentType, frame []byte) ([]byte, error) {
+ // If the content type isn't supported, nothing to do
+ if s == nil || !s.SupportedContentTypes().Has(ct) {
+ return frame, nil
+ }
+ return s.Sanitize(ctx, ct, frame)
+}
+
+func WithPriorData(ctx context.Context, frame []byte) context.Context {
+ return context.WithValue(ctx, priorDataKey, frame)
+}
+
+func GetPriorData(ctx context.Context) ([]byte, bool) {
+ b, ok := ctx.Value(priorDataKey).([]byte)
+ return b, ok
+}
+
+type priorDataKeyStruct struct{}
+
+var priorDataKey = priorDataKeyStruct{}
diff --git a/pkg/frame/sanitize/sanitize_test.go b/pkg/frame/sanitize/sanitize_test.go
new file mode 100644
index 00000000..0be80f68
--- /dev/null
+++ b/pkg/frame/sanitize/sanitize_test.go
@@ -0,0 +1,564 @@
+package sanitize
+
+import (
+ "context"
+ "encoding/json"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/weaveworks/libgitops/pkg/content"
+)
+
+func Test_defaultSanitizer_Sanitize(t *testing.T) {
+ tests := []struct {
+ name string
+ opts []JSONYAMLOption
+ ct content.ContentType
+ prior string
+ frame string
+ want string
+ wantErr error
+ checkErr func(error) bool
+ }{
+ {
+ name: "passthrough whatever",
+ ct: content.ContentType("unknown"),
+ frame: "{randomdata:",
+ want: "{randomdata:",
+ },
+ {
+ name: "default compact",
+ ct: content.ContentTypeJSON,
+ frame: `{
+ "foo": {
+ "bar": "baz"
+ }
+ }`,
+ opts: []JSONYAMLOption{},
+ want: `{"foo":{"bar":"baz"}}
+`,
+ },
+ {
+ name: "with two spaces",
+ ct: content.ContentTypeJSON,
+ frame: ` { "foo" : "bar" }
+`,
+ opts: []JSONYAMLOption{WithSpacesIndent(2)},
+ want: `{
+ "foo": "bar"
+}
+`,
+ },
+ {
+ name: "with four spaces",
+ ct: content.ContentTypeJSON,
+ frame: ` { "foo" : {"bar": "baz"} }
+`,
+ opts: []JSONYAMLOption{WithSpacesIndent(4)},
+ want: `{
+ "foo": {
+ "bar": "baz"
+ }
+}
+`,
+ },
+ {
+ name: "with tab indent",
+ ct: content.ContentTypeJSON,
+ frame: ` { "foo" : {"bar": "baz"} }
+`,
+ opts: []JSONYAMLOption{WithTabsIndent(1)},
+ want: `{
+ "foo": {
+ "bar": "baz"
+ }
+}
+`,
+ },
+ {
+ name: "with malformed",
+ ct: content.ContentTypeJSON,
+ frame: `{"foo":"`,
+ opts: []JSONYAMLOption{WithCompactIndent()},
+ checkErr: func(err error) bool {
+ _, ok := err.(*json.SyntaxError)
+ return ok
+ },
+ },
+ {
+ name: "only whitespace",
+ ct: content.ContentTypeJSON,
+ frame: `
+
+ `,
+ want: "",
+ },
+ {
+ name: "no json",
+ ct: content.ContentTypeJSON,
+ frame: "",
+ want: "",
+ },
+ {
+ name: "weird empty formatting",
+ ct: content.ContentTypeYAML,
+ frame: `
+---
+
+
+ `,
+ want: "",
+ },
+ {
+ name: "no yaml",
+ ct: content.ContentTypeYAML,
+ frame: "",
+ want: "",
+ },
+ {
+ name: "too many frames",
+ ct: content.ContentTypeYAML,
+ frame: `aa: true
+---
+bb: false
+`,
+ wantErr: ErrTooManyFrames,
+ },
+ {
+ name: "make sure lists are not expanded",
+ ct: content.ContentTypeYAML,
+ frame: `---
+kind: List
+apiVersion: "v1"
+items:
+- name: 123
+- name: 456
+`,
+ want: `kind: List
+apiVersion: "v1"
+items:
+- name: 123
+- name: 456
+`,
+ },
+ {
+ name: "yaml format; don't be confused by the bar commend",
+ ct: content.ContentTypeYAML,
+ frame: `---
+
+kind: List
+# foo
+apiVersion: "v1"
+items:
+ # bar
+- name: 123
+
+`,
+ want: `kind: List
+# foo
+apiVersion: "v1"
+items:
+# bar
+- name: 123
+`,
+ },
+ {
+ name: "detect indentation; don't be confused by the bar commend",
+ ct: content.ContentTypeYAML,
+ frame: `---
+
+kind: List
+# foo
+apiVersion: "v1"
+items:
+# bar
+ - name: 123
+
+`,
+ want: `kind: List
+# foo
+apiVersion: "v1"
+items:
+ # bar
+ - name: 123
+`,
+ },
+ {
+ name: "force compact",
+ ct: content.ContentTypeYAML,
+ opts: []JSONYAMLOption{WithCompactSeqIndent()},
+ frame: `---
+
+kind: List
+# foo
+apiVersion: "v1"
+items:
+ # bar
+ - name: 123
+
+`,
+ want: `kind: List
+# foo
+apiVersion: "v1"
+items:
+# bar
+- name: 123
+`,
+ },
+ {
+ name: "force wide",
+ ct: content.ContentTypeYAML,
+ opts: []JSONYAMLOption{WithWideSeqIndent()},
+ frame: `---
+
+kind: List
+# foo
+apiVersion: "v1"
+items:
+# bar
+- name: 123
+
+`,
+ want: `kind: List
+# foo
+apiVersion: "v1"
+items:
+ # bar
+ - name: 123
+`,
+ },
+ {
+ name: "invalid indentation",
+ ct: content.ContentTypeYAML,
+ frame: `---
+
+kind: "foo"
+ bar: true`,
+ checkErr: func(err error) bool {
+ return err.Error() == "yaml: line 1: did not find expected key"
+ },
+ },
+ {
+ name: "infer seq style from prior; default is compact",
+ ct: content.ContentTypeYAML,
+ opts: []JSONYAMLOption{},
+ prior: `# root
+# no lists here to look at
+
+kind: List # foo
+# bla
+apiVersion: v1
+`,
+ frame: `---
+kind: List
+apiVersion: v1
+items:
+ - item1 # hello
+ - item2
+`,
+ want: `# root
+# no lists here to look at
+
+kind: List # foo
+# bla
+apiVersion: v1
+items:
+- item1 # hello
+- item2
+`,
+ },
+ {
+ name: "copy comments; infer seq style from prior",
+ ct: content.ContentTypeYAML,
+ opts: []JSONYAMLOption{},
+ prior: `# root
+# hello
+
+kind: List # foo
+# bla
+apiVersion: v1
+notexist: foo # remember me!
+
+items:
+# ignoreme
+ - item1 # hello
+ # bla
+ - item2 # hi
+ # after`,
+ frame: `---
+kind: List
+apiVersion: v1
+fruits:
+- fruit1
+items:
+- item1
+- item2
+- item3
+`,
+ want: `# root
+# hello
+# Comments lost during file manipulation:
+# Field "notexist": "remember me!"
+
+kind: List # foo
+# bla
+apiVersion: v1
+fruits:
+ - fruit1
+items:
+ # ignoreme
+ - item1 # hello
+ # bla
+ - item2 # hi
+ # after
+
+ - item3
+`,
+ },
+ {
+ name: "copy comments; mappingnode keys are now alphabetically sorted",
+ ct: content.ContentTypeYAML,
+ opts: []JSONYAMLOption{},
+ prior: `# root
+# hello
+
+items:
+# ignoreme
+ - item1 # hello
+ # bla
+ - item2 # hi
+ # after
+kind: List # foo
+# bla
+apiVersion: v1
+notexist: foo # remember me!
+
+`,
+ frame: `---
+apiVersion: v1
+fruits:
+- fruit1
+
+items:
+- item1
+- item2
+- item3
+
+kind: List
+
+`,
+ want: `# root
+# hello
+# Comments lost during file manipulation:
+# Field "notexist": "remember me!"
+
+# bla
+apiVersion: v1
+fruits:
+ - fruit1
+items:
+ # ignoreme
+ - item1 # hello
+ # bla
+ - item2 # hi
+ # after
+
+ - item3
+kind: List # foo
+`,
+ },
+ {
+ name: "don't copy comments; infer from prior",
+ ct: content.ContentTypeYAML,
+ opts: []JSONYAMLOption{WithNoCommentsCopy()},
+ prior: `# root
+# hello
+
+kind: List # foo
+# bla
+apiVersion: v1
+notexist: foo # remember me!
+
+items:
+# ignoreme
+- item1 # hello
+ # bla
+ - item2 # trying to trick the system; but it should make style choice based on item1
+ # after`,
+ frame: `---
+kind: List
+apiVersion: v1
+fruits:
+- fruit1 # new
+items: # new
+- item1
+- item2
+# new
+- item3
+`,
+ want: `kind: List
+apiVersion: v1
+fruits:
+- fruit1 # new
+items: # new
+- item1
+- item2
+# new
+- item3
+`,
+ },
+ {
+ name: "invalid prior",
+ ct: content.ContentTypeYAML,
+ prior: `# root
+# hello
+
+kind: List # foo
+# bla
+apiVersion: v1
+notexist: foo # remember me!
+
+items:
+# ignoreme
+ - item1 # hello
+ # bla
+- item2 # trying to trick the system; but it should make style choice based on item1
+ # after`,
+ frame: `---
+kind: List
+apiVersion: v1
+fruits:
+- fruit1 # new
+items: # new
+- item1
+- item2
+# new
+- item3
+`,
+ checkErr: func(err error) bool {
+ return err.Error() == "yaml: line 3: did not find expected key"
+ },
+ },
+ {
+ name: "invalid copy comments; change from scalar to mapping node",
+ ct: content.ContentTypeYAML,
+ prior: `# root
+foo: "bar" # baz`,
+ frame: `
+foo:
+ name: "bar"
+`,
+ checkErr: func(err error) bool {
+ // from sigs.k8s.io/kustomize/kyaml/yaml/fns.go:728
+ return err.Error() == `wrong Node Kind for expected: ScalarNode was MappingNode: value: {name: "bar"}`
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ ctx := context.Background()
+ s := NewJSONYAML(tt.opts...)
+ if len(tt.prior) != 0 {
+ ctx = WithPriorData(ctx, []byte(tt.prior))
+ }
+ got, err := s.Sanitize(ctx, tt.ct, []byte(tt.frame))
+ assert.Equal(t, tt.want, string(got))
+ if tt.checkErr != nil {
+ assert.True(t, tt.checkErr(err))
+ } else {
+ assert.ErrorIs(t, err, tt.wantErr)
+ }
+ })
+ }
+}
+
+func TestIfSupported(t *testing.T) {
+ ctx := context.Background()
+ tests := []struct {
+ name string
+ s Sanitizer
+ ct content.ContentType
+ frame string
+ want string
+ wantErr bool
+ }{
+ {
+ name: "nil sanitizer",
+ frame: "foo",
+ want: "foo",
+ },
+ {
+ name: "unknown content type",
+ s: NewJSONYAML(),
+ ct: content.ContentType("unknown"),
+ frame: "foo",
+ want: "foo",
+ },
+ {
+ name: "sanitize",
+ s: NewJSONYAML(WithCompactIndent()),
+ ct: content.ContentTypeJSON,
+ frame: ` { "foo" : true } `,
+ want: `{"foo":true}
+`,
+ },
+ { // TODO: Test all possible corner cases with this, and move to the test above
+ name: "remove empty .metadata.creationTimestamp and .status",
+ s: NewJSONYAML(),
+ ct: content.ContentTypeYAML,
+ frame: `---
+apiVersion: v1
+kind: Pod
+metadata:
+ name: foo
+ creationTimestamp: null
+spec:
+ containers: null
+status: {}
+`,
+ want: `apiVersion: v1
+kind: Pod
+metadata:
+ name: foo
+spec:
+ containers: null
+`,
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ got, _ := IfSupported(ctx, tt.s, tt.ct, []byte(tt.frame))
+ assert.Equal(t, tt.want, string(got))
+ })
+ }
+}
+
+/*
+func ExampleClear() {
+ obj, err := yaml.Parse(`
+kind: Deployment
+metadata: null
+spec:
+ template: {}
+`)
+ if err != nil {
+ log.Fatal(err)
+ }
+ node, err := obj.Pipe(yaml.FieldClearer{Name: "metadata", IfEmpty: true})
+ if err != nil {
+ log.Fatal(err)
+ }
+ fmt.Println(node.String())
+ fmt.Println(obj.String())
+ // Output:
+ // name: app
+ // annotations:
+ // a.b.c: d.e.f
+ // g: h
+ //
+ // kind: Deployment
+ // spec:
+ // template: {}
+ //
+}
+*/
diff --git a/pkg/frame/single.go b/pkg/frame/single.go
new file mode 100644
index 00000000..27470721
--- /dev/null
+++ b/pkg/frame/single.go
@@ -0,0 +1,48 @@
+package frame
+
+import (
+ "context"
+ "io"
+
+ "github.com/weaveworks/libgitops/pkg/content"
+)
+
+func newSingleReader(r content.Reader, ct content.ContentType, o *singleReaderOptions) Reader {
+ // Make sure not more than this set of bytes can be read
+ r, _ = content.WrapLimited(r, o.MaxFrameSize)
+ return &singleReader{
+ // TODO: Apply options?
+ MetadataContainer: r.ContentMetadata().Clone().ToContainer(),
+ ContentTyped: ct,
+ r: r,
+ }
+}
+
+// singleReader implements reading a single frame (up to a certain limit) from an io.ReadCloser.
+// It MUST be wrapped in a higher-level composite Reader like the highlevelReader to satisfy the
+// Reader interface correctly.
+type singleReader struct {
+ content.MetadataContainer
+ content.ContentTyped
+ r content.Reader
+ hasBeenRead bool
+}
+
+// Read the whole frame from the underlying io.Reader, up to a given limit
+func (r *singleReader) ReadFrame(ctx context.Context) ([]byte, error) {
+ if r.hasBeenRead {
+ // This really should never happen, because the higher-level Reader should ensure
+ // no more than one frame can be read from the downstream as opts.MaxFrameCount == 1.
+ return nil, io.EOF // TODO: What about the third time?
+ }
+ // Mark we are now the frame (regardless of the result)
+ r.hasBeenRead = true
+ // Read the whole frame from the underlying io.Reader, up to a given amount
+ frame, err := io.ReadAll(r.r.WithContext(ctx))
+ if err != nil {
+ return nil, err
+ }
+ return frame, nil
+}
+
+func (r *singleReader) Close(ctx context.Context) error { return r.r.WithContext(ctx).Close() }
diff --git a/pkg/frame/utils.go b/pkg/frame/utils.go
new file mode 100644
index 00000000..ebf676c4
--- /dev/null
+++ b/pkg/frame/utils.go
@@ -0,0 +1,78 @@
+package frame
+
+import (
+ "context"
+ "errors"
+ "io"
+
+ "github.com/weaveworks/libgitops/pkg/tracing"
+ "go.opentelemetry.io/otel/trace"
+)
+
+// List is a list of list (byte arrays), used for convenience functions
+type List [][]byte
+
+// ListFromReader is a convenience method that constructs a List by reading
+// from the given Reader r until io.EOF. If an other error than io.EOF is returned,
+// reading is aborted immediately and the error is returned.
+func ListFromReader(ctx context.Context, r Reader) (List, error) {
+ var f List
+ for {
+ // Read until we get io.EOF or an error
+ frame, err := r.ReadFrame(ctx)
+ if errors.Is(err, io.EOF) {
+ break
+ } else if err != nil {
+ return nil, err
+ }
+ // Append all list to the returned list
+ f = append(f, frame)
+ }
+ return f, nil
+}
+
+func ListFromBytes(list ...[]byte) List { return list }
+
+// WriteTo is a convenience method that writes a set of list to a Writer.
+// If an error occurs, writing stops and the error is returned.
+func (f List) WriteTo(ctx context.Context, fw Writer) error {
+ // Loop all list in the list, and write them individually to the Writer
+ for _, frame := range f {
+ if err := fw.WriteFrame(ctx, frame); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ToIoWriteCloser transforms a Writer to an io.WriteCloser, by binding a relevant
+// context.Context to it. If err != nil, then n == 0. If err == nil, then n == len(frame).
+func ToIoWriteCloser(ctx context.Context, w Writer) io.WriteCloser {
+ return &ioWriterHelper{ctx, w}
+}
+
+type ioWriterHelper struct {
+ ctx context.Context
+ parent Writer
+}
+
+func (w *ioWriterHelper) Write(frame []byte) (n int, err error) {
+ if err := w.parent.WriteFrame(w.ctx, frame); err != nil {
+ return 0, err
+ }
+ return len(frame), nil
+}
+func (w *ioWriterHelper) Close() error {
+ return w.parent.Close(w.ctx)
+}
+
+func closeWithTrace(ctx context.Context, c Closer, obj interface{}) error {
+ return tracing.FromContext(ctx, obj).TraceFunc(ctx, "Close", func(ctx context.Context, _ trace.Span) error {
+ return c.Close(ctx)
+ }).Register()
+}
+
+// nopCloser returns nil when Close(ctx) is called
+type nopCloser struct{}
+
+func (*nopCloser) Close(context.Context) error { return nil }
diff --git a/pkg/frame/utils_test.go b/pkg/frame/utils_test.go
new file mode 100644
index 00000000..9eb10cd5
--- /dev/null
+++ b/pkg/frame/utils_test.go
@@ -0,0 +1,119 @@
+package frame
+
+import (
+ "bytes"
+ "context"
+ "io"
+ "io/fs"
+ "io/ioutil"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/weaveworks/libgitops/pkg/content"
+ "github.com/weaveworks/libgitops/pkg/tracing"
+ "github.com/weaveworks/libgitops/pkg/util/compositeio"
+ "github.com/weaveworks/libgitops/pkg/util/limitedio"
+)
+
+type rawCloserExposer interface {
+ RawCloser() io.Closer
+}
+
+func TestFromConstructors(t *testing.T) {
+ yamlPath := filepath.Join(t.TempDir(), "foo.yaml")
+ str := "foo: bar\n"
+ byteContent := []byte(str)
+ err := ioutil.WriteFile(yamlPath, byteContent, 0644)
+ require.Nil(t, err)
+
+ ctx := tracing.BackgroundTracingContext()
+ // FromYAMLFile -- found
+ got, err := FromYAMLFile(yamlPath).ReadFrame(ctx)
+ assert.Nil(t, err)
+ assert.Equal(t, str, string(got))
+ // content.FromFile -- already closed
+ f := content.FromFile(yamlPath)
+ (f.(rawCloserExposer)).RawCloser().Close() // deliberately close the file before giving it to the reader
+ got, err = NewYAMLReader(f).ReadFrame(ctx)
+ assert.ErrorIs(t, err, fs.ErrClosed)
+ assert.Empty(t, got)
+ // FromYAMLFile -- not found
+ got, err = FromYAMLFile(filepath.Join(t.TempDir(), "notexist.yaml")).ReadFrame(ctx)
+ assert.ErrorIs(t, err, fs.ErrNotExist)
+ assert.Empty(t, got)
+ // FromYAMLBytes
+ got, err = FromYAMLBytes(byteContent).ReadFrame(ctx)
+ assert.Nil(t, err)
+ assert.Equal(t, byteContent, got)
+ // FromYAMLString
+ got, err = FromYAMLString(str).ReadFrame(ctx)
+ assert.Nil(t, err)
+ assert.Equal(t, str, string(got))
+ assert.Nil(t, tracing.ForceFlushGlobal(ctx, 0))
+}
+
+func TestToIoWriteCloser(t *testing.T) {
+ var buf bytes.Buffer
+ closeRec := &recordingCloser{}
+ cw := content.NewWriter(compositeio.WriteCloser(&buf, closeRec))
+ w := NewYAMLWriter(cw, SingleOptions{MaxFrameSize: limitedio.Limit(testYAMLlen)})
+ ctx := tracing.BackgroundTracingContext()
+ iow := ToIoWriteCloser(ctx, w)
+
+ byteContent := []byte(testYAML)
+ n, err := iow.Write(byteContent)
+ assert.Len(t, byteContent, n)
+ assert.Nil(t, err)
+
+ // Check closing forwarding
+ assert.Nil(t, iow.Close())
+ assert.Equal(t, 1, closeRec.count)
+
+ // Try writing again
+ overflowContent := []byte(testYAML + testYAML)
+ n, err = iow.Write(overflowContent)
+ assert.Equal(t, 0, n)
+ assert.ErrorIs(t, err, &limitedio.ReadSizeOverflowError{})
+ // Assume the writer has been closed only once
+ assert.Equal(t, 1, closeRec.count)
+ assert.Equal(t, buf.String(), yamlSep+string(byteContent))
+
+ assert.Nil(t, tracing.ForceFlushGlobal(context.Background(), 0))
+}
+
+func TestListFromReader(t *testing.T) {
+ ctx := tracing.BackgroundTracingContext()
+ // Happy case
+ fr, err := ListFromReader(ctx, FromYAMLString(messyYAML))
+ assert.Equal(t, List{[]byte(testYAML), []byte(testYAML)}, fr)
+ assert.Nil(t, err)
+
+ // Non-happy case
+ r := NewJSONReader(content.FromString(testJSON2), SingleOptions{MaxFrameSize: limitedio.Limit(testJSONlen - 1)})
+ fr, err = ListFromReader(ctx, r)
+ assert.Len(t, fr, 0)
+ assert.ErrorIs(t, err, &limitedio.ReadSizeOverflowError{})
+ assert.Nil(t, tracing.ForceFlushGlobal(ctx, 0))
+}
+
+func TestList_WriteTo(t *testing.T) {
+ var buf bytes.Buffer
+ // TODO: Automatically get the name of the writer passed in, to avoid having to name
+ // everything. i.e. content.NewWriterName(string, io.Writer)
+ cw := content.NewWriter(&buf)
+ w := NewYAMLWriter(cw)
+ ctx := context.Background()
+ // Happy case
+ err := ListFromBytes([]byte(testYAML), []byte(testYAML)).WriteTo(ctx, w)
+ assert.Equal(t, buf.String(), yamlSep+testYAML+yamlSep+testYAML)
+ assert.Nil(t, err)
+
+ // Non-happy case
+ buf.Reset()
+ w = NewJSONWriter(cw, SingleOptions{MaxFrameSize: limitedio.Limit(testJSONlen)})
+ err = ListFromBytes([]byte(testJSON), []byte(testJSON2)).WriteTo(ctx, w)
+ assert.Equal(t, buf.String(), testJSON)
+ assert.ErrorIs(t, err, &limitedio.ReadSizeOverflowError{})
+}
diff --git a/pkg/frame/writer.go b/pkg/frame/writer.go
new file mode 100644
index 00000000..5a6f93fc
--- /dev/null
+++ b/pkg/frame/writer.go
@@ -0,0 +1,76 @@
+package frame
+
+import (
+ "context"
+ "sync"
+
+ "github.com/weaveworks/libgitops/pkg/content"
+ "github.com/weaveworks/libgitops/pkg/frame/sanitize"
+ "github.com/weaveworks/libgitops/pkg/tracing"
+ "github.com/weaveworks/libgitops/pkg/util/limitedio"
+ "go.opentelemetry.io/otel/trace"
+)
+
+func newHighlevelWriter(w Writer, opts *writerOptions) Writer {
+ return &highlevelWriter{
+ writer: w,
+ writerMu: &sync.Mutex{},
+ opts: opts,
+ }
+}
+
+type highlevelWriter struct {
+ writer Writer
+ writerMu *sync.Mutex
+ opts *writerOptions
+ // frameCount counts the amount of successful frames written
+ frameCount int64
+}
+
+func (w *highlevelWriter) WriteFrame(ctx context.Context, frame []byte) error {
+ w.writerMu.Lock()
+ defer w.writerMu.Unlock()
+
+ return tracing.FromContext(ctx, w).TraceFunc(ctx, "WriteFrame", func(ctx context.Context, span trace.Span) error {
+ // Refuse to write too large frames
+ if w.opts.MaxFrameSize.IsLessThan(int64(len(frame))) {
+ return limitedio.ErrReadSizeOverflow(w.opts.MaxFrameSize)
+ }
+ // Refuse to write more than the maximum amount of frames
+ if w.opts.MaxFrameCount.IsLessThanOrEqual(w.frameCount) {
+ return ErrFrameCountOverflow(w.opts.MaxFrameCount)
+ }
+
+ // Sanitize the frame
+ // TODO: Maybe create a composite writer that actually reads the given frame first, to
+ // fully sanitize/validate it, and first then write the frames out using the writer?
+ frame, err := sanitize.IfSupported(ctx, w.opts.Sanitizer, w.ContentType(), frame)
+ if err != nil {
+ return err
+ }
+
+ // Register the amount of (sanitized) bytes and call the underlying Writer
+ span.SetAttributes(content.SpanAttrByteContent(frame)...)
+
+ // Catch empty frames
+ if len(frame) == 0 {
+ return nil
+ }
+
+ err = w.writer.WriteFrame(ctx, frame)
+
+ // Increase the frame counter, if the write was successful
+ if err == nil {
+ w.frameCount += 1
+ }
+ return err
+ }).Register()
+}
+
+func (w *highlevelWriter) ContentType() content.ContentType { return w.writer.ContentType() }
+func (w *highlevelWriter) Close(ctx context.Context) error {
+ return closeWithTrace(ctx, w.writer, w)
+}
+
+// Just forward the metadata, don't do anything specific with it
+func (w *highlevelWriter) ContentMetadata() content.Metadata { return w.writer.ContentMetadata() }
diff --git a/pkg/frame/writer_delegate.go b/pkg/frame/writer_delegate.go
new file mode 100644
index 00000000..fa968e97
--- /dev/null
+++ b/pkg/frame/writer_delegate.go
@@ -0,0 +1,58 @@
+package frame
+
+import (
+ "context"
+ "io"
+
+ "github.com/weaveworks/libgitops/pkg/content"
+)
+
+func newDelegatingWriter(ct content.ContentType, w content.Writer) Writer {
+ return &delegatingWriter{
+ // TODO: Register options?
+ MetadataContainer: w.ContentMetadata().Clone().ToContainer(),
+ ContentTyped: ct,
+ w: w,
+ }
+}
+
+// delegatingWriter is an implementation of the Writer interface
+type delegatingWriter struct {
+ content.MetadataContainer
+ content.ContentTyped
+ w content.Writer
+}
+
+func (w *delegatingWriter) WriteFrame(ctx context.Context, frame []byte) error {
+ // Write the frame to the underlying writer
+ n, err := w.w.WithContext(ctx).Write(frame)
+ // Guard against short writes
+ return catchShortWrite(n, err, frame)
+}
+
+func (w *delegatingWriter) Close(ctx context.Context) error { return w.w.WithContext(ctx).Close() }
+
+func newErrWriter(ct content.ContentType, err error, meta content.Metadata) Writer {
+ return &errWriter{
+ meta.Clone().ToContainer(),
+ ct,
+ &nopCloser{},
+ err,
+ }
+}
+
+type errWriter struct {
+ content.MetadataContainer
+ content.ContentTyped
+ Closer
+ err error
+}
+
+func (w *errWriter) WriteFrame(context.Context, []byte) error { return w.err }
+
+func catchShortWrite(n int, err error, frame []byte) error {
+ if n < len(frame) && err == nil {
+ err = io.ErrShortWrite
+ }
+ return err
+}
diff --git a/pkg/frame/writer_factory.go b/pkg/frame/writer_factory.go
new file mode 100644
index 00000000..1191648c
--- /dev/null
+++ b/pkg/frame/writer_factory.go
@@ -0,0 +1,50 @@
+package frame
+
+import (
+ "io"
+
+ "github.com/weaveworks/libgitops/pkg/content"
+ "k8s.io/apimachinery/pkg/runtime/serializer/json"
+)
+
+func (defaultFactory) NewWriter(ct content.ContentType, w content.Writer, opts ...WriterOption) Writer {
+ o := defaultWriterOptions().applyOptions(opts)
+
+ var lowlevel Writer
+ switch ct {
+ case content.ContentTypeYAML:
+ lowlevel = newDelegatingWriter(content.ContentTypeYAML, w.Wrap(func(underlying io.WriteCloser) io.Writer {
+ // This writer always prepends a "---" before each frame
+ return json.YAMLFramer.NewFrameWriter(underlying)
+ }))
+ case content.ContentTypeJSON:
+ // JSON documents are self-framing; hence, no need to wrap the writer in any way
+ lowlevel = newDelegatingWriter(content.ContentTypeJSON, w)
+ default:
+ return newErrWriter(ct, content.ErrUnsupportedContentType(ct), w.ContentMetadata())
+ }
+ return newHighlevelWriter(lowlevel, o)
+}
+
+func (defaultFactory) NewSingleWriter(ct content.ContentType, w content.Writer, opts ...SingleWriterOption) Writer {
+ o := defaultSingleWriterOptions().applyOptions(opts)
+
+ return newHighlevelWriter(newDelegatingWriter(ct, w), &writerOptions{
+ Options: Options{
+ SingleOptions: o.SingleOptions,
+ MaxFrameCount: 1,
+ },
+ })
+}
+
+func (f defaultFactory) NewRecognizingWriter(w content.Writer, opts ...RecognizingWriterOption) Writer {
+ o := defaultRecognizingWriterOptions().applyOptions(opts)
+
+ // Recognize the content type using the given recognizer
+ r, ct, err := content.NewRecognizingWriter(w, o.Recognizer)
+ if err != nil {
+ return newErrWriter("", err, r.ContentMetadata())
+ }
+ // Re-use the logic of the "main" Writer constructor; validate ct there
+ return f.NewWriter(ct, w, o)
+}
diff --git a/pkg/frame/writer_test.go b/pkg/frame/writer_test.go
new file mode 100644
index 00000000..80281407
--- /dev/null
+++ b/pkg/frame/writer_test.go
@@ -0,0 +1,34 @@
+package frame
+
+import (
+ "bytes"
+ "context"
+ "io"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/weaveworks/libgitops/pkg/content"
+)
+
+func TestNewWriter_Unrecognized(t *testing.T) {
+ fr := DefaultFactory().NewWriter(content.ContentType("doesnotexist"), content.NewWriter(io.Discard))
+ ctx := context.Background()
+ err := fr.WriteFrame(ctx, make([]byte, 1))
+ assert.ErrorIs(t, err, &content.UnsupportedContentTypeError{})
+}
+
+func TestWriterShortBuffer(t *testing.T) {
+ var buf bytes.Buffer
+ w := &halfWriter{&buf}
+ ctx := context.Background()
+ err := NewYAMLWriter(content.NewWriter(w)).WriteFrame(ctx, []byte("foo: bar"))
+ assert.Equal(t, io.ErrShortWrite, err)
+}
+
+type halfWriter struct {
+ w io.Writer
+}
+
+func (w *halfWriter) Write(p []byte) (int, error) {
+ return w.w.Write(p[0 : (len(p)+1)/2])
+}
diff --git a/pkg/gitdir/gitdir.go b/pkg/gitdir/gitdir.go
deleted file mode 100644
index a9eb0b70..00000000
--- a/pkg/gitdir/gitdir.go
+++ /dev/null
@@ -1,474 +0,0 @@
-package gitdir
-
-import (
- "context"
- "errors"
- "fmt"
- "io/ioutil"
- "os"
- "sync"
- "time"
-
- "github.com/fluxcd/go-git-providers/gitprovider"
- git "github.com/go-git/go-git/v5"
- "github.com/go-git/go-git/v5/plumbing"
- "github.com/go-git/go-git/v5/plumbing/object"
- log "github.com/sirupsen/logrus"
- "k8s.io/apimachinery/pkg/util/wait"
-)
-
-var (
- // ErrNotStarted happens if you try to operate on the gitDirectory before you have started
- // it with StartCheckoutLoop.
- ErrNotStarted = errors.New("the gitDirectory hasn't been started (and hence, cloned) yet")
- // ErrCannotWriteToReadOnly happens if you try to do a write operation for a non-authenticated Git repo.
- ErrCannotWriteToReadOnly = errors.New("the gitDirectory is read-only, cannot write")
-)
-
-const (
- defaultBranch = "master"
- defaultRemote = "origin"
- defaultInterval = 30 * time.Second
- defaultTimeout = 1 * time.Minute
-)
-
-// GitDirectoryOptions provides options for the gitDirectory.
-// TODO: Refactor this into the controller-runtime Options factory pattern.
-type GitDirectoryOptions struct {
- // Options
- Branch string // default "master"
- Interval time.Duration // default 30s
- Timeout time.Duration // default 1m
- // TODO: Support folder prefixes
-
- // Authentication
- AuthMethod AuthMethod
-}
-
-func (o *GitDirectoryOptions) Default() {
- if o.Branch == "" {
- o.Branch = defaultBranch
- }
- if o.Interval == 0 {
- o.Interval = defaultInterval
- }
- if o.Timeout == 0 {
- o.Timeout = defaultTimeout
- }
-}
-
-// GitDirectory is an abstraction layer for a temporary Git clone. It pulls
-// and checks out new changes periodically in the background. It also allows
-// high-level access to write operations, like creating a new branch, committing,
-// and pushing.
-type GitDirectory interface {
- // Dir returns the backing temporary directory of the git clone.
- Dir() string
- // MainBranch returns the configured main branch.
- MainBranch() string
- // RepositoryRef returns the repository reference.
- RepositoryRef() gitprovider.RepositoryRef
-
- // StartCheckoutLoop clones the repo synchronously, and then starts the checkout loop non-blocking.
- // If the checkout loop has been started already, this is a no-op.
- StartCheckoutLoop() error
- // Suspend waits for any pending transactions or operations, and then locks the internal mutex so that
- // no other operations can start. This means the periodic background checkout loop will momentarily stop.
- Suspend()
- // Resume unlocks the mutex locked in Suspend(), so that other Git operations, like the background checkout
- // loop can resume its operation.
- Resume()
-
- // Pull performs a pull & checkout to the latest revision.
- // ErrNotStarted is returned if the repo hasn't been cloned yet.
- Pull(ctx context.Context) error
-
- // CheckoutNewBranch creates a new branch and checks out to it.
- // ErrNotStarted is returned if the repo hasn't been cloned yet.
- CheckoutNewBranch(branchName string) error
- // CheckoutMainBranch goes back to the main branch.
- // ErrNotStarted is returned if the repo hasn't been cloned yet.
- CheckoutMainBranch() error
-
- // Commit creates a commit of all changes in the current worktree with the given parameters.
- // It also automatically pushes the branch after the commit.
- // ErrNotStarted is returned if the repo hasn't been cloned yet.
- // ErrCannotWriteToReadOnly is returned if opts.AuthMethod wasn't provided.
- Commit(ctx context.Context, authorName, authorEmail, msg string) error
- // CommitChannel is a channel to where new observed Git SHAs are written.
- CommitChannel() chan string
-
- // Cleanup terminates any pending operations, and removes the temporary directory.
- Cleanup() error
-}
-
-// Create a new GitDirectory implementation. In order to start using this, run StartCheckoutLoop().
-func NewGitDirectory(repoRef gitprovider.RepositoryRef, opts GitDirectoryOptions) (GitDirectory, error) {
- log.Info("Initializing the Git repo...")
-
- // Default the options
- opts.Default()
-
- // Create a temporary directory for the clone
- tmpDir, err := ioutil.TempDir("", "libgitops")
- if err != nil {
- return nil, err
- }
- log.Debugf("Created temporary directory for the git clone at %q", tmpDir)
-
- d := &gitDirectory{
- repoRef: repoRef,
- GitDirectoryOptions: opts,
- cloneDir: tmpDir,
- // TODO: This needs to be large, otherwise it can start blocking unnecessarily if nobody reads it
- commitChan: make(chan string, 1024),
- lock: &sync.Mutex{},
- }
- // Set up the parent context for this class. d.cancel() is called only at Cleanup()
- d.ctx, d.cancel = context.WithCancel(context.Background())
-
- log.Trace("URL endpoint parsed and authentication method chosen")
-
- if d.canWrite() {
- log.Infof("Running in read-write mode, will commit back current status to the repo")
- } else {
- log.Infof("Running in read-only mode, won't write status back to the repo")
- }
-
- return d, nil
-}
-
-// gitDirectory is an implementation which keeps a directory
-type gitDirectory struct {
- // user-specified options
- repoRef gitprovider.RepositoryRef
- GitDirectoryOptions
-
- // the temporary directory used for the clone
- cloneDir string
-
- // go-git objects. wt is the worktree of the repo, persistent during the lifetime of repo.
- repo *git.Repository
- wt *git.Worktree
-
- // latest known commit to the system
- lastCommit string
- // events channel from new commits
- commitChan chan string
-
- // the context and its cancel function for the lifetime of this struct (until Cleanup())
- ctx context.Context
- cancel context.CancelFunc
- // the lock for git operations (so pushing and pulling aren't done simultaneously)
- lock *sync.Mutex
-}
-
-func (d *gitDirectory) Dir() string {
- return d.cloneDir
-}
-
-func (d *gitDirectory) MainBranch() string {
- return d.Branch
-}
-
-func (d *gitDirectory) RepositoryRef() gitprovider.RepositoryRef {
- return d.repoRef
-}
-
-// StartCheckoutLoop clones the repo synchronously, and then starts the checkout loop non-blocking.
-// If the checkout loop has been started already, this is a no-op.
-func (d *gitDirectory) StartCheckoutLoop() error {
- if d.wt != nil {
- return nil // already initialized
- }
- // First, clone the repo
- if err := d.clone(); err != nil {
- return err
- }
- go d.checkoutLoop()
- return nil
-}
-
-func (d *gitDirectory) Suspend() {
- d.lock.Lock()
-}
-
-func (d *gitDirectory) Resume() {
- d.lock.Unlock()
-}
-
-func (d *gitDirectory) CommitChannel() chan string {
- return d.commitChan
-}
-
-func (d *gitDirectory) checkoutLoop() {
- log.Info("Starting the checkout loop...")
-
- wait.NonSlidingUntilWithContext(d.ctx, func(_ context.Context) {
-
- log.Trace("checkoutLoop: Will perform pull operation")
- // Perform a pull & checkout of the new revision
- if err := d.Pull(d.ctx); err != nil {
- log.Errorf("checkoutLoop: git pull failed with error: %v", err)
- return
- }
-
- }, d.Interval)
- log.Info("Exiting the checkout loop...")
-}
-
-func (d *gitDirectory) cloneURL() string {
- return d.repoRef.GetCloneURL(d.AuthMethod.TransportType())
-}
-
-func (d *gitDirectory) canWrite() bool {
- return d.AuthMethod != nil
-}
-
-// verifyRead makes sure it's ok to start a read-something-from-git process
-func (d *gitDirectory) verifyRead() error {
- // Safeguard against not starting yet
- if d.wt == nil {
- return fmt.Errorf("cannot pull: %w", ErrNotStarted)
- }
- return nil
-}
-
-// verifyWrite makes sure it's ok to start a write-something-to-git process
-func (d *gitDirectory) verifyWrite() error {
- // We need all read privileges first
- if err := d.verifyRead(); err != nil {
- return err
- }
- // Make sure we don't write to a possibly read-only repo
- if !d.canWrite() {
- return ErrCannotWriteToReadOnly
- }
- return nil
-}
-
-func (d *gitDirectory) clone() error {
- // Lock the mutex now that we're starting, and unlock it when exiting
- d.lock.Lock()
- defer d.lock.Unlock()
-
- log.Infof("Starting to clone the repository %s with timeout %s", d.repoRef, d.Timeout)
- // Do a clone operation to the temporary directory, with a timeout
- err := d.contextWithTimeout(d.ctx, func(ctx context.Context) error {
- var err error
- d.repo, err = git.PlainCloneContext(ctx, d.Dir(), false, &git.CloneOptions{
- URL: d.cloneURL(),
- Auth: d.AuthMethod,
- RemoteName: defaultRemote,
- ReferenceName: plumbing.NewBranchReferenceName(d.Branch),
- SingleBranch: true,
- NoCheckout: false,
- //Depth: 1, // ref: https://github.com/src-d/go-git/issues/1143
- RecurseSubmodules: 0,
- Progress: nil,
- Tags: git.NoTags,
- })
- return err
- })
- // Handle errors
- switch err {
- case nil:
- // no-op, just continue.
- case context.DeadlineExceeded:
- return fmt.Errorf("git clone operation took longer than deadline %s", d.Timeout)
- case context.Canceled:
- log.Tracef("context was cancelled")
- return nil // if Cleanup() was called, just exit the goroutine
- default:
- return fmt.Errorf("git clone error: %v", err)
- }
-
- // Populate the worktree pointer
- d.wt, err = d.repo.Worktree()
- if err != nil {
- return fmt.Errorf("git get worktree error: %v", err)
- }
-
- // Get the latest HEAD commit and report it to the user
- ref, err := d.repo.Head()
- if err != nil {
- return err
- }
-
- d.observeCommit(ref.Hash())
- return nil
-}
-
-func (d *gitDirectory) Pull(ctx context.Context) error {
- // Lock the mutex now that we're starting, and unlock it when exiting
- d.lock.Lock()
- defer d.lock.Unlock()
-
- // Make sure it's okay to read
- if err := d.verifyRead(); err != nil {
- return err
- }
-
- // Perform the git pull operation using the timeout
- err := d.contextWithTimeout(ctx, func(innerCtx context.Context) error {
- log.Trace("checkoutLoop: Starting pull operation")
- return d.wt.PullContext(innerCtx, &git.PullOptions{
- Auth: d.AuthMethod,
- SingleBranch: true,
- })
- })
- // Handle errors
- switch err {
- case nil, git.NoErrAlreadyUpToDate:
- // no-op, just continue. Allow the git.NoErrAlreadyUpToDate error
- case context.DeadlineExceeded:
- return fmt.Errorf("git pull operation took longer than deadline %s", d.Timeout)
- case context.Canceled:
- log.Tracef("context was cancelled")
- return nil // if Cleanup() was called, just exit the goroutine
- default:
- return fmt.Errorf("failed to pull: %v", err)
- }
-
- log.Trace("checkoutLoop: Pulled successfully")
-
- // get current head
- ref, err := d.repo.Head()
- if err != nil {
- return err
- }
-
- // check if we changed commits
- if d.lastCommit != ref.Hash().String() {
- // Notify upstream that we now have a new commit, and allow writing again
- d.observeCommit(ref.Hash())
- }
-
- return nil
-}
-
-func (d *gitDirectory) CheckoutNewBranch(branchName string) error {
- // Make sure it's okay to write
- if err := d.verifyWrite(); err != nil {
- return err
- }
-
- return d.wt.Checkout(&git.CheckoutOptions{
- Branch: plumbing.NewBranchReferenceName(branchName),
- Create: true,
- })
-}
-
-func (d *gitDirectory) CheckoutMainBranch() error {
- // Make sure it's okay to write
- if err := d.verifyWrite(); err != nil {
- return err
- }
-
- // Best-effort clean
- _ = d.wt.Clean(&git.CleanOptions{
- Dir: true,
- })
- // Force-checkout the main branch
- return d.wt.Checkout(&git.CheckoutOptions{
- Branch: plumbing.NewBranchReferenceName(d.Branch),
- Force: true,
- })
-}
-
-// observeCommit sets the lastCommit variable so that we know the latest state
-func (d *gitDirectory) observeCommit(commit plumbing.Hash) {
- d.lastCommit = commit.String()
- d.commitChan <- commit.String()
- log.Infof("New commit observed on branch %q: %s", d.Branch, commit)
-}
-
-// Commit creates a commit of all changes in the current worktree with the given parameters.
-// It also automatically pushes the branch after the commit.
-// ErrNotStarted is returned if the repo hasn't been cloned yet.
-// ErrCannotWriteToReadOnly is returned if opts.AuthMethod wasn't provided.
-func (d *gitDirectory) Commit(ctx context.Context, authorName, authorEmail, msg string) error {
- // Make sure it's okay to write
- if err := d.verifyWrite(); err != nil {
- return err
- }
-
- s, err := d.wt.Status()
- if err != nil {
- return fmt.Errorf("git status failed: %v", err)
- }
- if s.IsClean() {
- log.Debugf("No changed files in git repo, nothing to commit...")
- return nil
- }
-
- // Do a commit and push
- log.Debug("commitLoop: Committing all local changes")
- hash, err := d.wt.Commit(msg, &git.CommitOptions{
- All: true,
- Author: &object.Signature{
- Name: authorName,
- Email: authorEmail,
- When: time.Now(),
- },
- })
- if err != nil {
- return fmt.Errorf("git commit error: %v", err)
- }
-
- // Perform the git push operation using the timeout
- err = d.contextWithTimeout(ctx, func(innerCtx context.Context) error {
- log.Debug("commitLoop: Will push with timeout")
- return d.repo.PushContext(innerCtx, &git.PushOptions{
- Auth: d.AuthMethod,
- })
- })
- // Handle errors
- switch err {
- case nil, git.NoErrAlreadyUpToDate:
- // no-op, just continue. Allow the git.NoErrAlreadyUpToDate error
- case context.DeadlineExceeded:
- return fmt.Errorf("git push operation took longer than deadline %s", d.Timeout)
- case context.Canceled:
- log.Tracef("context was cancelled")
- return nil // if Cleanup() was called, just exit the goroutine
- default:
- return fmt.Errorf("failed to push: %v", err)
- }
-
- // Notify upstream that we now have a new commit, and allow writing again
- log.Infof("A new commit with the actual state has been created and pushed to the origin: %q", hash)
- d.observeCommit(hash)
- return nil
-}
-
-func (d *gitDirectory) contextWithTimeout(ctx context.Context, fn func(context.Context) error) error {
- // Create a new context with a timeout. The push operation either succeeds in time, times out,
- // or is cancelled by Cleanup(). In case of a successful run, the context is always cancelled afterwards.
- ctx, cancel := context.WithTimeout(ctx, d.Timeout)
- defer cancel()
-
- // Run the function using the context and cancel directly afterwards
- fnErr := fn(ctx)
-
- // Return the context error, if any, first so deadline/cancel signals can propagate.
- // Otherwise passthrough the error returned from the function.
- if ctx.Err() != nil {
- log.Debugf("operation context yielded error %v to be returned. Function error was: %v", ctx.Err(), fnErr)
- return ctx.Err()
- }
- return fnErr
-}
-
-// Cleanup cancels running goroutines and operations, and removes the temporary clone directory
-func (d *gitDirectory) Cleanup() error {
- // Cancel the context for the two running goroutines, and any possible long-running operations
- d.cancel()
-
- // Remove the temporary directory
- if err := os.RemoveAll(d.Dir()); err != nil {
- log.Errorf("Failed to clean up temp git directory: %v", err)
- return err
- }
- return nil
-}
diff --git a/pkg/runtime/doc.go b/pkg/runtime/doc.go
deleted file mode 100644
index 4eb2a1ee..00000000
--- a/pkg/runtime/doc.go
+++ /dev/null
@@ -1,2 +0,0 @@
-// +k8s:deepcopy-gen=package
-package runtime
diff --git a/pkg/runtime/identifiers.go b/pkg/runtime/identifiers.go
deleted file mode 100644
index 87bc00e2..00000000
--- a/pkg/runtime/identifiers.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package runtime
-
-import (
- "fmt"
-
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-// DefaultNamespace describes the default namespace name used for the system.
-const DefaultNamespace = "default"
-
-// Identifyable is an object which can be identified
-type Identifyable interface {
- // GetIdentifier can return e.g. a "namespace/name" combination, which is not guaranteed
- // to be unique world-wide, or alternatively a random SHA for instance
- GetIdentifier() string
-}
-
-type identifier string
-
-func (i identifier) GetIdentifier() string { return string(i) }
-
-type Metav1NameIdentifierFactory struct{}
-
-func (id Metav1NameIdentifierFactory) Identify(o interface{}) (Identifyable, bool) {
- switch obj := o.(type) {
- case metav1.Object:
- if len(obj.GetNamespace()) == 0 || len(obj.GetName()) == 0 {
- return nil, false
- }
- return NewIdentifier(fmt.Sprintf("%s/%s", obj.GetNamespace(), obj.GetName())), true
- }
- return nil, false
-}
-
-type ObjectUIDIdentifierFactory struct{}
-
-func (id ObjectUIDIdentifierFactory) Identify(o interface{}) (Identifyable, bool) {
- switch obj := o.(type) {
- case Object:
- if len(obj.GetUID()) == 0 {
- return nil, false
- }
- // TODO: Make sure that runtime.APIType works with this
- return NewIdentifier(string(obj.GetUID())), true
- }
- return nil, false
-}
-
-var (
- // Metav1Identifier identifies an object using its metav1.ObjectMeta Name and Namespace
- Metav1NameIdentifier IdentifierFactory = Metav1NameIdentifierFactory{}
- // ObjectUIDIdentifier identifies an object using its libgitops/pkg/runtime.ObjectMeta UID field
- ObjectUIDIdentifier IdentifierFactory = ObjectUIDIdentifierFactory{}
-)
-
-func NewIdentifier(str string) Identifyable {
- return identifier(str)
-}
-
-type IdentifierFactory interface {
- Identify(o interface{}) (id Identifyable, ok bool)
-}
diff --git a/pkg/runtime/meta.go b/pkg/runtime/meta.go
deleted file mode 100644
index 32930e18..00000000
--- a/pkg/runtime/meta.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package runtime
-
-import (
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/runtime"
- "sigs.k8s.io/yaml"
-)
-
-// PartialObjectImpl is a struct implementing PartialObject, used for
-// unmarshalling unknown objects into this intermediate type
-// where .Name, .UID, .Kind and .APIVersion become easily available
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-type PartialObjectImpl struct {
- metav1.TypeMeta `json:",inline"`
- metav1.ObjectMeta `json:"metadata"`
-}
-
-func (po *PartialObjectImpl) IsPartialObject() {}
-
-// This constructor ensures the PartialObjectImpl fields are not nil.
-// TODO: Make this multi-document-aware?
-func NewPartialObject(frame []byte) (PartialObject, error) {
- obj := &PartialObjectImpl{}
-
- // The yaml package supports both YAML and JSON. Don't use the serializer, as the APIType
- // wrapper is not registered in any scheme.
- if err := yaml.Unmarshal(frame, obj); err != nil {
- return nil, err
- }
-
- return obj, nil
-}
-
-var _ Object = &PartialObjectImpl{}
-var _ PartialObject = &PartialObjectImpl{}
-
-// Object is an union of the Object interfaces that are accessible for a
-// type that embeds both metav1.TypeMeta and metav1.ObjectMeta.
-type Object interface {
- runtime.Object
- metav1.ObjectMetaAccessor
- metav1.Object
-}
-
-// PartialObject is a partially-decoded object, where only metadata has been loaded.
-type PartialObject interface {
- Object
-
- // IsPartialObject is a dummy function for signalling that this is a partially-loaded object
- // i.e. only TypeMeta and ObjectMeta are stored in memory.
- IsPartialObject()
-}
diff --git a/pkg/runtime/zz_generated.deepcopy.go b/pkg/runtime/zz_generated.deepcopy.go
deleted file mode 100644
index 20beb72f..00000000
--- a/pkg/runtime/zz_generated.deepcopy.go
+++ /dev/null
@@ -1,67 +0,0 @@
-// +build !ignore_autogenerated
-
-// Code generated by deepcopy-gen. DO NOT EDIT.
-
-package runtime
-
-import (
- pkgruntime "k8s.io/apimachinery/pkg/runtime"
-)
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Metav1NameIdentifierFactory) DeepCopyInto(out *Metav1NameIdentifierFactory) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metav1NameIdentifierFactory.
-func (in *Metav1NameIdentifierFactory) DeepCopy() *Metav1NameIdentifierFactory {
- if in == nil {
- return nil
- }
- out := new(Metav1NameIdentifierFactory)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ObjectUIDIdentifierFactory) DeepCopyInto(out *ObjectUIDIdentifierFactory) {
- *out = *in
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectUIDIdentifierFactory.
-func (in *ObjectUIDIdentifierFactory) DeepCopy() *ObjectUIDIdentifierFactory {
- if in == nil {
- return nil
- }
- out := new(ObjectUIDIdentifierFactory)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *PartialObjectImpl) DeepCopyInto(out *PartialObjectImpl) {
- *out = *in
- out.TypeMeta = in.TypeMeta
- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
- return
-}
-
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectImpl.
-func (in *PartialObjectImpl) DeepCopy() *PartialObjectImpl {
- if in == nil {
- return nil
- }
- out := new(PartialObjectImpl)
- in.DeepCopyInto(out)
- return out
-}
-
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new pkgruntime.Object.
-func (in *PartialObjectImpl) DeepCopyObject() pkgruntime.Object {
- if c := in.DeepCopy(); c != nil {
- return c
- }
- return nil
-}
diff --git a/pkg/serializer/comments.go b/pkg/serializer/comments.go
index 302c4db0..7ac6461c 100644
--- a/pkg/serializer/comments.go
+++ b/pkg/serializer/comments.go
@@ -2,12 +2,15 @@ package serializer
import (
"bytes"
+ "context"
"encoding/base64"
"errors"
"fmt"
"github.com/sirupsen/logrus"
- "github.com/weaveworks/libgitops/pkg/serializer/comments"
+ "github.com/weaveworks/libgitops/pkg/content"
+ "github.com/weaveworks/libgitops/pkg/frame"
+ "github.com/weaveworks/libgitops/pkg/frame/sanitize/comments"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/kustomize/kyaml/yaml"
@@ -24,10 +27,10 @@ var (
// tryToPreserveComments tries to save the original file data (base64-encoded) into an annotation.
// This original file data can be used at encoding-time to preserve comments
-func (d *decoder) tryToPreserveComments(doc []byte, obj runtime.Object, ct ContentType) {
+func (d *decoder) tryToPreserveComments(doc []byte, obj runtime.Object, ct content.ContentType) {
// If the user opted into preserving comments and the format is YAML, proceed
// If they didn't, return directly
- if !(*d.opts.PreserveComments && ct == ContentTypeYAML) {
+ if !(d.opts.PreserveComments == PreserveCommentsStrict && ct == content.ContentTypeYAML) {
return
}
@@ -39,15 +42,16 @@ func (d *decoder) tryToPreserveComments(doc []byte, obj runtime.Object, ct Conte
}
// tryToPreserveComments tries to locate the possibly-saved original file data in the object's annotation
-func (e *encoder) encodeWithCommentSupport(versionEncoder runtime.Encoder, fw FrameWriter, obj runtime.Object, metaObj metav1.Object) error {
+func (e *encoder) encodeWithCommentSupport(versionEncoder runtime.Encoder, fw frame.Writer, obj runtime.Object, metaObj metav1.Object) error {
+ ctx := context.TODO()
// If the user did not opt into preserving comments, just sanitize ObjectMeta temporarily and and return
- if !*e.opts.PreserveComments {
+ if e.opts.PreserveComments == PreserveCommentsDisable {
// Normal encoding without the annotation (so it doesn't leak by accident)
return noAnnotationWrapper(metaObj, e.normalEncodeFunc(versionEncoder, fw, obj))
}
// The user requested to preserve comments, but content type is not YAML, so log, sanitize and return
- if fw.ContentType() != ContentTypeYAML {
+ if fw.ContentType() != content.ContentTypeYAML {
logrus.Debugf("Asked to preserve comments, but ContentType is not YAML, so ignoring")
// Normal encoding without the annotation (so it doesn't leak by accident)
@@ -64,7 +68,7 @@ func (e *encoder) encodeWithCommentSupport(versionEncoder runtime.Encoder, fw Fr
// Encode the new object into a temporary buffer, it should not be written as the "final result" to the FrameWriter
buf := new(bytes.Buffer)
- if err := noAnnotationWrapper(metaObj, e.normalEncodeFunc(versionEncoder, NewYAMLFrameWriter(buf), obj)); err != nil {
+ if err := noAnnotationWrapper(metaObj, e.normalEncodeFunc(versionEncoder, frame.ToYAMLBuffer(buf), obj)); err != nil {
// fatal error
return err
}
@@ -78,20 +82,22 @@ func (e *encoder) encodeWithCommentSupport(versionEncoder runtime.Encoder, fw Fr
}
// Copy over comments from the old to the new schema
+ // TODO: Move over to use the frame Sanitizer flow
if err := comments.CopyComments(priorNode, afterNode, true); err != nil {
// fatal error
return err
}
// Print the new schema with the old comments kept to the FrameWriter
- _, err = fmt.Fprint(fw, afterNode.MustString())
+ _, err = fmt.Fprint(frame.ToIoWriteCloser(ctx, fw), afterNode.MustString())
// we're done, exit the encode function
return err
}
-func (e *encoder) normalEncodeFunc(versionEncoder runtime.Encoder, fw FrameWriter, obj runtime.Object) func() error {
+func (e *encoder) normalEncodeFunc(versionEncoder runtime.Encoder, fw frame.Writer, obj runtime.Object) func() error {
return func() error {
- return versionEncoder.Encode(obj, fw)
+ ctx := context.TODO()
+ return versionEncoder.Encode(obj, frame.ToIoWriteCloser(ctx, fw))
}
}
diff --git a/pkg/serializer/comments_test.go b/pkg/serializer/comments_test.go
index 8f4c65c2..7a03a25a 100644
--- a/pkg/serializer/comments_test.go
+++ b/pkg/serializer/comments_test.go
@@ -31,13 +31,13 @@ spec:
data:
- field # Inline comment
- another:
- subthing: "yes"
+ subthing: "yes"
thing:
# Head comment
var: true
status:
nested:
- fields:
+ fields: {}
# Just a comment
`
diff --git a/pkg/serializer/convertor.go b/pkg/serializer/convertor.go
index bdea096c..d51efd30 100644
--- a/pkg/serializer/convertor.go
+++ b/pkg/serializer/convertor.go
@@ -19,19 +19,25 @@ var (
errObjMustNotBeBoth = errors.New("given object must not implement both the Convertible and Hub interfaces")
)
-func newConverter(scheme *runtime.Scheme) *converter {
+func NewConverter(schemeLock LockedScheme) *converter {
return &converter{
- scheme: scheme,
- convertor: newObjectConvertor(scheme, true),
+ LockedScheme: schemeLock,
+ convertor: newObjectConvertor(schemeLock.Scheme(), true),
}
}
// converter implements the Converter interface
+// TODO: This implementation should support converting from a
+// convertible to an other convertible through the Hub
type converter struct {
- scheme *runtime.Scheme
+ LockedScheme
convertor *objectConvertor
}
+func (c *converter) GetLockedScheme() LockedScheme {
+ return c.LockedScheme
+}
+
// Convert converts in directly into out. out should be an empty object of the destination type.
// Both objects must be of the same kind and either have autogenerated conversions registered, or
// be controller-runtime CRD-style implementers of the sigs.k8s.io/controller-runtime/pkg/conversion.Hub
@@ -46,7 +52,7 @@ func (c *converter) Convert(in, out runtime.Object) error {
// TODO: If needed, this function could only accept a GroupVersion, not GroupVersionKind
func (c *converter) ConvertIntoNew(in runtime.Object, gvk schema.GroupVersionKind) (runtime.Object, error) {
// Create a new object of the given gvk
- obj, err := c.scheme.New(gvk)
+ obj, err := c.Scheme().New(gvk)
if err != nil {
return nil, err
}
@@ -169,7 +175,8 @@ func (c *objectConvertor) ConvertToVersion(in runtime.Object, groupVersioner run
// as before, using the scheme's ConvertToVersion function. But if we don't want to convert the newly-decoded
// external object, we can just do nothing and the object will stay unconverted.
// doConversion is always true in the Encode codepath.
- if !c.doConversion {
+ // Also, never convert unknown, partial metadata or unstructured objects (defined as "non-convertible").
+ if !c.doConversion || IsNonConvertible(in) {
// DeepCopy the object to make sure that although in would be somehow modified, it doesn't affect out
return in.DeepCopyObject(), nil
}
diff --git a/pkg/serializer/decode.go b/pkg/serializer/decode.go
index 4feff21f..7b4177da 100644
--- a/pkg/serializer/decode.go
+++ b/pkg/serializer/decode.go
@@ -1,123 +1,51 @@
package serializer
import (
+ "context"
"fmt"
"io"
"reflect"
- "github.com/weaveworks/libgitops/pkg/util"
+ "github.com/weaveworks/libgitops/pkg/content"
+ "github.com/weaveworks/libgitops/pkg/frame"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/apimachinery/pkg/runtime/serializer/versioning"
- "sigs.k8s.io/yaml"
)
// This is the groupversionkind for the v1.List object
var listGVK = metav1.Unversioned.WithKind("List")
-type DecodingOptions struct {
- // Not applicable for Decoder.DecodeInto(). If true, the decoded external object
- // will be converted into its hub (or internal, where applicable) representation. Otherwise, the decoded
- // object will be left in its external representation. (Default: false)
- ConvertToHub *bool
-
- // Parse the YAML/JSON in strict mode, returning a specific error if the input
- // contains duplicate or unknown fields or formatting errors. (Default: true)
- Strict *bool
-
- // Automatically default the decoded object. (Default: false)
- Default *bool
-
- // Only applicable for Decoder.DecodeAll(). If the underlying data contains a v1.List,
- // the items of the list will be traversed, decoded into their respective types, and
- // appended to the returned slice. The v1.List will in this case not be returned.
- // This conversion does NOT support preserving comments. If the given scheme doesn't
- // recognize the v1.List, before using it will be registered automatically. (Default: true)
- DecodeListElements *bool
-
- // Whether to preserve YAML comments internally. This only works for objects embedding metav1.ObjectMeta.
- // Only applicable to ContentTypeYAML framers.
- // Using any other framer will be silently ignored. Usage of this option also requires setting
- // the PreserveComments in EncodingOptions, too. (Default: false)
- PreserveComments *bool
-
- // DecodeUnknown specifies whether decode objects with an unknown GroupVersionKind into a
- // *runtime.Unknown object when running Decode(All) (true value) or to return an error when
- // any unrecognized type is found (false value). (Default: false)
- DecodeUnknown *bool
-}
-
-type DecodingOptionsFunc func(*DecodingOptions)
-
-func WithConvertToHubDecode(convert bool) DecodingOptionsFunc {
- return func(opts *DecodingOptions) {
- opts.ConvertToHub = &convert
- }
-}
-
-func WithStrictDecode(strict bool) DecodingOptionsFunc {
- return func(opts *DecodingOptions) {
- opts.Strict = &strict
- }
-}
-
-func WithDefaultsDecode(defaults bool) DecodingOptionsFunc {
- return func(opts *DecodingOptions) {
- opts.Default = &defaults
- }
-}
-
-func WithListElementsDecoding(listElements bool) DecodingOptionsFunc {
- return func(opts *DecodingOptions) {
- opts.DecodeListElements = &listElements
- }
-}
+// TODO: To think about: should we take in the DecodeOptions at Decode time instead
+// as a variadic-sized Option slice? It would probably take caching the *json.Serializer
+// and runtime.Decoder for the given options they use, though.
-func WithCommentsDecode(comments bool) DecodingOptionsFunc {
- return func(opts *DecodingOptions) {
- opts.PreserveComments = &comments
- }
-}
+func NewDecoder(schemeLock LockedScheme, opts ...DecodeOption) Decoder {
+ // Make the options struct
+ o := *defaultDecodeOpts().ApplyOptions(opts)
-func WithUnknownDecode(unknown bool) DecodingOptionsFunc {
- return func(opts *DecodingOptions) {
- opts.DecodeUnknown = &unknown
- }
-}
+ // Allow both YAML and JSON inputs (JSON is a subset of YAML), and deserialize in strict mode
+ s := json.NewSerializerWithOptions(json.DefaultMetaFactory, schemeLock.Scheme(), schemeLock.Scheme(), json.SerializerOptions{
+ Yaml: true,
+ Strict: *o.Strict,
+ })
-func WithDecodingOptions(newOpts DecodingOptions) DecodingOptionsFunc {
- return func(opts *DecodingOptions) {
- // TODO: Null-check all of these before using them
- *opts = newOpts
- }
-}
+ decodeCodec := decoderForVersion(schemeLock.Scheme(), s, *o.Default, *o.ConvertToHub)
-func defaultDecodeOpts() *DecodingOptions {
- return &DecodingOptions{
- ConvertToHub: util.BoolPtr(false),
- Strict: util.BoolPtr(true),
- Default: util.BoolPtr(false),
- DecodeListElements: util.BoolPtr(true),
- PreserveComments: util.BoolPtr(false),
- DecodeUnknown: util.BoolPtr(false),
- }
-}
-
-func newDecodeOpts(fns ...DecodingOptionsFunc) *DecodingOptions {
- opts := defaultDecodeOpts()
- for _, fn := range fns {
- fn(opts)
- }
- return opts
+ return &decoder{schemeLock, decodeCodec, o}
}
type decoder struct {
- *schemeAndCodec
+ LockedScheme
decoder runtime.Decoder
- opts DecodingOptions
+ opts DecodeOptions
+}
+
+func (d *decoder) GetLockedScheme() LockedScheme {
+ return d.LockedScheme
}
// Decode returns the decoded object from the next document in the FrameReader stream.
@@ -136,21 +64,28 @@ type decoder struct {
// If opts.DecodeUnknown is true, any type with an unrecognized apiVersion/kind will be returned as a
// *runtime.Unknown object instead of returning a UnrecognizedTypeError.
// opts.DecodeListElements is not applicable in this call.
-func (d *decoder) Decode(fr FrameReader) (runtime.Object, error) {
+func (d *decoder) Decode(fr frame.Reader) (runtime.Object, error) {
// Read a frame from the FrameReader
// TODO: Make sure to test the case when doc might contain something, and err is io.EOF
- doc, err := fr.ReadFrame()
+ ctx := context.TODO()
+ doc, err := fr.ReadFrame(ctx)
if err != nil {
return nil, err
}
return d.decode(doc, nil, fr.ContentType())
}
-func (d *decoder) decode(doc []byte, into runtime.Object, ct ContentType) (runtime.Object, error) {
+func (d *decoder) decode(doc []byte, into runtime.Object, ct content.ContentType) (runtime.Object, error) {
// If the scheme doesn't recognize a v1.List, and we enabled opts.DecodeListElements,
// make the scheme able to decode the v1.List automatically
- if *d.opts.DecodeListElements && !d.scheme.Recognizes(listGVK) {
- d.scheme.AddKnownTypes(metav1.Unversioned, &metav1.List{})
+ if *d.opts.DecodeListElements {
+ // As .AddKnownTypes is writing to the scheme, make sure we guard the check and the write with a
+ // mutex.
+ d.SchemeLock()
+ if !d.Scheme().Recognizes(listGVK) {
+ d.Scheme().AddKnownTypes(metav1.Unversioned, &metav1.List{})
+ }
+ d.SchemeUnlock()
}
// Record if this decode call should have runtime.DecodeInto-functionality
@@ -169,7 +104,7 @@ func (d *decoder) decode(doc []byte, into runtime.Object, ct ContentType) (runti
// Give the user good errors wrt missing group & version
// TODO: It might be unnecessary to unmarshal twice (as we do in handleDecodeError),
// as gvk was returned from Decode above.
- return nil, d.handleDecodeError(doc, err)
+ return nil, d.handleDecodeError(gvk, err)
}
// Fail fast if object is nil
@@ -207,10 +142,14 @@ func (d *decoder) decode(doc []byte, into runtime.Object, ct ContentType) (runti
// opts.DecodeUnknown is not applicable in this call. In case you want to decode an object into a
// *runtime.Unknown, just create a runtime.Unknown object and pass the pointer as obj into DecodeInto
// and it'll work.
-func (d *decoder) DecodeInto(fr FrameReader, into runtime.Object) error {
+//
+// TODO: Support decoding all frames at once into e.g. PartialMetadataLists, UnstructuredLists, or
+// metav1.Lists.
+func (d *decoder) DecodeInto(fr frame.Reader, into runtime.Object) error {
// Read a frame from the FrameReader.
// TODO: Make sure to test the case when doc might contain something, and err is io.EOF
- doc, err := fr.ReadFrame()
+ ctx := context.TODO()
+ doc, err := fr.ReadFrame(ctx)
if err != nil {
return err
}
@@ -235,7 +174,7 @@ func (d *decoder) DecodeInto(fr FrameReader, into runtime.Object) error {
// added into the returning slice. The v1.List will in this case not be returned.
// If opts.DecodeUnknown is true, any type with an unrecognized apiVersion/kind will be returned as a
// *runtime.Unknown object instead of returning a UnrecognizedTypeError.
-func (d *decoder) DecodeAll(fr FrameReader) ([]runtime.Object, error) {
+func (d *decoder) DecodeAll(fr frame.Reader) ([]runtime.Object, error) {
objs := []runtime.Object{}
for {
obj, err := d.Decode(fr)
@@ -258,7 +197,7 @@ func (d *decoder) DecodeAll(fr FrameReader) ([]runtime.Object, error) {
}
// decodeUnknown decodes bytes of a certain content type into a returned *runtime.Unknown object
-func (d *decoder) decodeUnknown(doc []byte, ct ContentType) (runtime.Object, error) {
+func (d *decoder) decodeUnknown(doc []byte, ct content.ContentType) (runtime.Object, error) {
// Do a DecodeInto the new pointer to the object we've got. The resulting into object is
// also returned.
// The content type isn't really used here, as runtime.Unknown will never implement
@@ -266,28 +205,22 @@ func (d *decoder) decodeUnknown(doc []byte, ct ContentType) (runtime.Object, err
return d.decode(doc, &runtime.Unknown{}, ct)
}
-func (d *decoder) handleDecodeError(doc []byte, origErr error) error {
- // Parse the document's TypeMeta information
- gvk, err := extractYAMLTypeMeta(doc)
- if err != nil {
- return fmt.Errorf("failed to interpret TypeMeta from the given the YAML: %v. Decode error was: %w", err, origErr)
- }
-
+func (d *decoder) handleDecodeError(gvk *schema.GroupVersionKind, origErr error) error {
// TODO: Unit test that typed errors are returned properly
-
+ // TODO: Check for gvk == nil here?
// Check if the group was known. If not, return that specific error
- if !d.scheme.IsGroupRegistered(gvk.Group) {
+ if !d.Scheme().IsGroupRegistered(gvk.Group) {
return NewUnrecognizedGroupError(*gvk, origErr)
}
// Return a structured error if the group was registered with the scheme but the version was unrecognized
- if !d.scheme.IsVersionRegistered(gvk.GroupVersion()) {
- gvs := d.scheme.PrioritizedVersionsForGroup(gvk.Group)
+ if !d.Scheme().IsVersionRegistered(gvk.GroupVersion()) {
+ gvs := d.Scheme().PrioritizedVersionsForGroup(gvk.Group)
return NewUnrecognizedVersionError(gvs, *gvk, origErr)
}
// Return a structured error if the kind is not known
- if !d.scheme.Recognizes(*gvk) {
+ if !d.Scheme().Recognizes(*gvk) {
return NewUnrecognizedKindError(*gvk, origErr)
}
@@ -295,7 +228,7 @@ func (d *decoder) handleDecodeError(doc []byte, origErr error) error {
return origErr
}
-func (d *decoder) extractNestedObjects(obj runtime.Object, ct ContentType) ([]runtime.Object, error) {
+func (d *decoder) extractNestedObjects(obj runtime.Object, ct content.ContentType) ([]runtime.Object, error) {
// If we didn't ask for list-unwrapping functionality, return directly
if !*d.opts.DecodeListElements {
return []runtime.Object{obj}, nil
@@ -320,18 +253,6 @@ func (d *decoder) extractNestedObjects(obj runtime.Object, ct ContentType) ([]ru
return objs, nil
}
-func newDecoder(schemeAndCodec *schemeAndCodec, opts DecodingOptions) Decoder {
- // Allow both YAML and JSON inputs (JSON is a subset of YAML), and deserialize in strict mode
- s := json.NewSerializerWithOptions(json.DefaultMetaFactory, schemeAndCodec.scheme, schemeAndCodec.scheme, json.SerializerOptions{
- Yaml: true,
- Strict: *opts.Strict,
- })
-
- decodeCodec := decoderForVersion(schemeAndCodec.scheme, s, *opts.Default, *opts.ConvertToHub)
-
- return &decoder{schemeAndCodec, decodeCodec, opts}
-}
-
// decoderForVersion is used instead of CodecFactory.DecoderForVersion, as we want to use our own converter
func decoderForVersion(scheme *runtime.Scheme, decoder *json.Serializer, doDefaulting, doConversion bool) runtime.Decoder {
return newConversionCodecForScheme(
@@ -361,20 +282,38 @@ func newConversionCodecForScheme(
defaulter = scheme
}
convertor := newObjectConvertor(scheme, performConversion)
- return versioning.NewCodec(encoder, decoder, convertor, scheme, scheme, defaulter, encodeVersion, decodeVersion, scheme.Name())
+ // a typer that recognizes metav1.PartialObjectMetadata{,List}
+ typer := &customTyper{scheme}
+ return versioning.NewCodec(encoder, decoder, convertor, scheme, typer, defaulter, encodeVersion, decodeVersion, scheme.Name())
}
-// TODO: Use https://github.com/kubernetes/apimachinery/blob/master/pkg/runtime/serializer/yaml/meta.go
-// when we can assume everyone is vendoring k8s v1.19
-func extractYAMLTypeMeta(data []byte) (*schema.GroupVersionKind, error) {
- typeMeta := runtime.TypeMeta{}
- if err := yaml.Unmarshal(data, &typeMeta); err != nil {
- return nil, fmt.Errorf("could not interpret GroupVersionKind: %w", err)
- }
- gv, err := schema.ParseGroupVersion(typeMeta.APIVersion)
- if err != nil {
- return nil, err
+var _ runtime.ObjectTyper = &customTyper{}
+
+type customTyper struct {
+ scheme *runtime.Scheme
+}
+
+// ObjectKinds is an extension to the native Scheme.ObjectKinds function, that also
+// recognizes partial matadata objects and lists. The logic here follows closely the
+// scheme's own logic.
+func (t *customTyper) ObjectKinds(obj runtime.Object) ([]schema.GroupVersionKind, bool, error) {
+ // partial objects are always fine to encode/decode as-is when GVK is set.
+ // this similar code exists in runtime.Scheme.ObjectKinds for reference.
+ if IsPartialObject(obj) || IsPartialObjectList(obj) {
+ // we require that the GVK be populated in order to recognize the object
+ gvk := obj.GetObjectKind().GroupVersionKind()
+ if len(gvk.Kind) == 0 {
+ return nil, false, runtime.NewMissingKindErr("unstructured object has no kind")
+ }
+ if len(gvk.Version) == 0 {
+ return nil, false, runtime.NewMissingVersionErr("unstructured object has no version")
+ }
+ return []schema.GroupVersionKind{gvk}, false, nil
}
- gvk := gv.WithKind(typeMeta.Kind)
- return &gvk, nil
+ return t.scheme.ObjectKinds(obj)
+}
+
+// Recognizes just calls the underlying Scheme.Recognizes
+func (t *customTyper) Recognizes(gvk schema.GroupVersionKind) bool {
+ return t.scheme.Recognizes(gvk)
}
diff --git a/pkg/serializer/defaulter.go b/pkg/serializer/defaulter.go
index 6ff0ad8e..2b96ab44 100644
--- a/pkg/serializer/defaulter.go
+++ b/pkg/serializer/defaulter.go
@@ -6,19 +6,25 @@ import (
"k8s.io/apimachinery/pkg/util/errors"
)
-func newDefaulter(scheme *runtime.Scheme) *defaulter {
- return &defaulter{scheme}
+func NewDefaulter(schemeLock LockedScheme) Defaulter {
+ // We do not write to the scheme in the defaulter at this time.
+ // If we start doing that, we must also make use of the locker
+ return &defaulter{schemeLock}
}
type defaulter struct {
- scheme *runtime.Scheme
+ LockedScheme
+}
+
+func (d *defaulter) GetLockedScheme() LockedScheme {
+ return d.LockedScheme
}
// NewDefaultedObject returns a new, defaulted object. It is essentially scheme.New() and
// scheme.Default(obj), but with extra logic to also cover internal versions.
// Important to note here is that the TypeMeta information is NOT applied automatically.
func (d *defaulter) NewDefaultedObject(gvk schema.GroupVersionKind) (runtime.Object, error) {
- obj, err := d.scheme.New(gvk)
+ obj, err := d.Scheme().New(gvk)
if err != nil {
return nil, err
}
@@ -41,36 +47,36 @@ func (d *defaulter) Default(objs ...runtime.Object) error {
func (d *defaulter) runDefaulting(obj runtime.Object) error {
// First, get the groupversionkind of the object
- gvk, err := GVKForObject(d.scheme, obj)
+ gvk, err := GVKForObject(d.Scheme(), obj)
if err != nil {
return err
}
// If the version is external, just default it and return.
if gvk.Version != runtime.APIVersionInternal {
- d.scheme.Default(obj)
+ d.Scheme().Default(obj)
return nil
}
// We know that the current object is internal
// Get the preferred external version...
- gv, err := prioritizedVersionForGroup(d.scheme, gvk.Group)
+ gv, err := PreferredVersionForGroup(d.Scheme(), gvk.Group)
if err != nil {
return err
}
// ...and make a new object of it
- external, err := d.scheme.New(gv.WithKind(gvk.Kind))
+ external, err := d.Scheme().New(gv.WithKind(gvk.Kind))
if err != nil {
return err
}
// Convert the internal object to the external
- if err := d.scheme.Convert(obj, external, nil); err != nil {
+ if err := d.Scheme().Convert(obj, external, nil); err != nil {
return err
}
// Default the external
- d.scheme.Default(external)
+ d.Scheme().Default(external)
// And convert back to internal
- return d.scheme.Convert(external, obj, nil)
+ return d.Scheme().Convert(external, obj, nil)
}
diff --git a/pkg/serializer/encode.go b/pkg/serializer/encode.go
index 77061932..75a762c4 100644
--- a/pkg/serializer/encode.go
+++ b/pkg/serializer/encode.go
@@ -1,73 +1,40 @@
package serializer
import (
- "github.com/sirupsen/logrus"
- "github.com/weaveworks/libgitops/pkg/util"
+ "bytes"
+ "context"
+ "encoding/json"
+ "io"
+ "strings"
+
+ "github.com/weaveworks/libgitops/pkg/content"
+ "github.com/weaveworks/libgitops/pkg/frame"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
+ k8sserializer "k8s.io/apimachinery/pkg/runtime/serializer"
)
-type EncodingOptions struct {
- // Use pretty printing when writing to the output. (Default: true)
- // TODO: Fix that sometimes omitempty fields aren't respected
- Pretty *bool
- // Whether to preserve YAML comments internally. This only works for objects embedding metav1.ObjectMeta.
- // Only applicable to ContentTypeYAML framers.
- // Using any other framer will be silently ignored. Usage of this option also requires setting
- // the PreserveComments in DecodingOptions, too. (Default: false)
- // TODO: Make this a BestEffort & Strict mode
- PreserveComments *bool
-
- // TODO: Maybe consider an option to always convert to the preferred version (not just internal)
-}
-
-type EncodingOptionsFunc func(*EncodingOptions)
-
-func WithPrettyEncode(pretty bool) EncodingOptionsFunc {
- return func(opts *EncodingOptions) {
- opts.Pretty = &pretty
- }
-}
-
-func WithCommentsEncode(comments bool) EncodingOptionsFunc {
- return func(opts *EncodingOptions) {
- opts.PreserveComments = &comments
+func NewEncoder(schemeLock LockedScheme, codecs *k8sserializer.CodecFactory, opts ...EncodeOption) Encoder {
+ return &encoder{
+ LockedScheme: schemeLock,
+ codecs: codecs,
+ opts: *defaultEncodeOpts().ApplyOptions(opts),
}
}
-func WithEncodingOptions(newOpts EncodingOptions) EncodingOptionsFunc {
- return func(opts *EncodingOptions) {
- // TODO: Null-check all of these before using them
- *opts = newOpts
- }
-}
+type encoder struct {
+ LockedScheme
+ codecs *k8sserializer.CodecFactory
-func defaultEncodeOpts() *EncodingOptions {
- return &EncodingOptions{
- Pretty: util.BoolPtr(true),
- PreserveComments: util.BoolPtr(false),
- }
+ opts EncodeOptions
}
-func newEncodeOpts(fns ...EncodingOptionsFunc) *EncodingOptions {
- opts := defaultEncodeOpts()
- for _, fn := range fns {
- fn(opts)
- }
- return opts
+func (e *encoder) GetLockedScheme() LockedScheme {
+ return e.LockedScheme
}
-type encoder struct {
- *schemeAndCodec
-
- opts EncodingOptions
-}
-
-func newEncoder(schemeAndCodec *schemeAndCodec, opts EncodingOptions) Encoder {
- return &encoder{
- schemeAndCodec,
- opts,
- }
+func (e *encoder) CodecFactory() *k8sserializer.CodecFactory {
+ return e.codecs
}
// Encode encodes the given objects and writes them to the specified FrameWriter.
@@ -75,17 +42,18 @@ func newEncoder(schemeAndCodec *schemeAndCodec, opts EncodingOptions) Encoder {
// internal object given to the preferred external groupversion. No conversion will happen
// if the given object is of an external version.
// TODO: This should automatically convert to the preferred version
-func (e *encoder) Encode(fw FrameWriter, objs ...runtime.Object) error {
+// TODO: Fix that sometimes omitempty fields aren't respected
+func (e *encoder) Encode(fw frame.Writer, objs ...runtime.Object) error {
for _, obj := range objs {
// Get the kind for the given object
- gvk, err := GVKForObject(e.scheme, obj)
+ gvk, err := GVKForObject(e.Scheme(), obj)
if err != nil {
return err
}
// If the object is internal, convert it to the preferred external one
if gvk.Version == runtime.APIVersionInternal {
- gv, err := prioritizedVersionForGroup(e.scheme, gvk.Group)
+ gv, err := PreferredVersionForGroup(e.Scheme(), gvk.Group)
if err != nil {
return err
}
@@ -103,36 +71,40 @@ func (e *encoder) Encode(fw FrameWriter, objs ...runtime.Object) error {
// EncodeForGroupVersion encodes the given object for the specific groupversion. If the object
// is not of that version currently it will try to convert. The output bytes are written to the
// FrameWriter. The FrameWriter specifies the ContentType.
-func (e *encoder) EncodeForGroupVersion(fw FrameWriter, obj runtime.Object, gv schema.GroupVersion) error {
+func (e *encoder) EncodeForGroupVersion(fw frame.Writer, obj runtime.Object, gv schema.GroupVersion) error {
// Get the serializer for the media type
serializerInfo, ok := runtime.SerializerInfoForMediaType(e.codecs.SupportedMediaTypes(), string(fw.ContentType()))
if !ok {
- return ErrUnsupportedContentType
+ return content.ErrUnsupportedContentType(fw.ContentType()) // TODO: Say what content types are supported
}
- // Choose the pretty or non-pretty one
+ // Choose the default, non-pretty serializer, as we prettify if needed later
+ // We technically could use the JSON PrettySerializer here, but it does not catch the
+ // cases where the JSON iterator invokes MarshalJSON() on an object, and that object
+ // returns non-pretty bytes (e.g. *unstructured.Unstructured). Hence, it is more robust
+ // and extensible to always use the non-pretty serializer, and only on request indent
+ // a given number of spaces after JSON encoding.
encoder := serializerInfo.Serializer
- // Use the pretty serializer if it was asked for and is defined for the content type
- if *e.opts.Pretty {
- // Apparently not all SerializerInfos have this field defined (e.g. YAML)
- // TODO: This could be considered a bug in upstream, create an issue
- if serializerInfo.PrettySerializer != nil {
- encoder = serializerInfo.PrettySerializer
- } else {
- logrus.Debugf("PrettySerializer for ContentType %s is nil, falling back to Serializer.", fw.ContentType())
- }
- }
-
// Get a version-specific encoder for the specified groupversion
- versionEncoder := encoderForVersion(e.scheme, encoder, gv)
+ versionEncoder := encoderForVersion(e.Scheme(), encoder, gv)
+
+ ctx := context.TODO()
+ wc := frame.ToIoWriteCloser(ctx, fw)
+
+ // Check if the user requested prettified JSON output.
+ // If the ContentType is JSON this is ok, we will intent the encode output on the fly.
+ if *e.opts.JSONIndent > 0 && fw.ContentType() == content.ContentTypeJSON {
+ wc = &jsonPrettyWriter{indent: *e.opts.JSONIndent, wc: wc}
+ }
// Cast the object to a metav1.Object to get access to annotations
metaobj, ok := toMetaObject(obj)
// For objects without ObjectMeta, the cast will fail. Allow that failure and do "normal" encoding
if !ok {
- return versionEncoder.Encode(obj, fw)
+ return versionEncoder.Encode(obj, wc)
}
+ // TODO: Document that the frame.Writer is not closed
// Specialize the encoder for a specific gv and encode the object
return e.encodeWithCommentSupport(versionEncoder, fw, obj, metaobj)
@@ -150,3 +122,24 @@ func encoderForVersion(scheme *runtime.Scheme, encoder runtime.Encoder, gv schem
true, // convert if needed before encode
)
}
+
+type jsonPrettyWriter struct {
+ indent int32
+ wc io.WriteCloser
+}
+
+func (w *jsonPrettyWriter) Write(p []byte) (n int, err error) {
+ // Indent the source bytes
+ var indented bytes.Buffer
+ err = json.Indent(&indented, p, "", strings.Repeat(" ", int(w.indent)))
+ if err != nil {
+ return
+ }
+ // Write the pretty bytes to the underlying writer
+ n, err = w.wc.Write(indented.Bytes())
+ return
+}
+
+func (w *jsonPrettyWriter) Close() error {
+ return w.wc.Close()
+}
diff --git a/pkg/serializer/error_structs.go b/pkg/serializer/error_structs.go
deleted file mode 100644
index 11109b37..00000000
--- a/pkg/serializer/error_structs.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package serializer
-
-var _ ReadCloser = &errReadCloser{}
-
-type errReadCloser struct {
- err error
-}
-
-func (rc *errReadCloser) Read(p []byte) (n int, err error) {
- err = rc.err
- return
-}
-
-func (rc *errReadCloser) Close() error {
- return nil
-}
-
-var _ FrameReader = &errFrameReader{}
-
-type errFrameReader struct {
- err error
- contentType ContentType
-}
-
-func (fr *errFrameReader) ReadFrame() ([]byte, error) {
- return nil, fr.err
-}
-
-func (fr *errFrameReader) ContentType() ContentType {
- return fr.contentType
-}
-
-// Close implements io.Closer and closes the underlying ReadCloser
-func (fr *errFrameReader) Close() error {
- return nil
-}
-
-var _ FrameWriter = &errFrameWriter{}
-
-type errFrameWriter struct {
- err error
- contentType ContentType
-}
-
-func (fw *errFrameWriter) Write(_ []byte) (n int, err error) {
- err = fw.err
- return
-}
-
-func (fw *errFrameWriter) ContentType() ContentType {
- return fw.contentType
-}
diff --git a/pkg/serializer/frame_reader.go b/pkg/serializer/frame_reader.go
deleted file mode 100644
index 26ead8d2..00000000
--- a/pkg/serializer/frame_reader.go
+++ /dev/null
@@ -1,168 +0,0 @@
-package serializer
-
-import (
- "bytes"
- "errors"
- "io"
- "io/ioutil"
- "os"
-
- "k8s.io/apimachinery/pkg/runtime/serializer/json"
-)
-
-const (
- defaultBufSize = 64 * 1024 // 64 kB
- defaultMaxFrameSize = 16 * 1024 * 1024 // 16 MB
-)
-
-var (
- // FrameOverflowErr is returned from FrameReader.ReadFrame when one frame exceeds the
- // maximum size of 16 MB.
- FrameOverflowErr = errors.New("frame was larger than maximum allowed size")
-)
-
-// ReadCloser in this package is an alias for io.ReadCloser. It helps in Godoc to locate
-// helpers in this package which returns writers (i.e. FromFile and FromBytes)
-type ReadCloser io.ReadCloser
-
-// FrameReader is a content-type specific reader of a given ReadCloser.
-// The FrameReader reads frames from the underlying ReadCloser and returns them for consumption.
-// When io.EOF is reached, the stream is closed automatically.
-type FrameReader interface {
- ContentTyped
- io.Closer
-
- // ReadFrame reads frames from the underlying ReadCloser and returns them for consumption.
- // When io.EOF is reached, the stream is closed automatically.
- ReadFrame() ([]byte, error)
-}
-
-// NewFrameReader returns a FrameReader for the given ContentType and data in the
-// ReadCloser. The Reader is automatically closed in io.EOF. ReadFrame is called
-// once each Decoder.Decode() or Decoder.DecodeInto() call. When Decoder.DecodeAll() is
-// called, the FrameReader is read until io.EOF, upon where it is closed.
-func NewFrameReader(contentType ContentType, rc ReadCloser) FrameReader {
- switch contentType {
- case ContentTypeYAML:
- return newFrameReader(json.YAMLFramer.NewFrameReader(rc), contentType)
- case ContentTypeJSON:
- return newFrameReader(json.Framer.NewFrameReader(rc), contentType)
- default:
- return &errFrameReader{ErrUnsupportedContentType, contentType}
- }
-}
-
-// NewYAMLFrameReader returns a FrameReader that supports both YAML and JSON. Frames are separated by "---\n"
-//
-// This call is the same as NewFrameReader(ContentTypeYAML, rc)
-func NewYAMLFrameReader(rc ReadCloser) FrameReader {
- return NewFrameReader(ContentTypeYAML, rc)
-}
-
-// NewJSONFrameReader returns a FrameReader that supports both JSON. Objects are read from the stream one-by-one,
-// each object making up its own frame.
-//
-// This call is the same as NewFrameReader(ContentTypeJSON, rc)
-func NewJSONFrameReader(rc ReadCloser) FrameReader {
- return NewFrameReader(ContentTypeJSON, rc)
-}
-
-// newFrameReader returns a new instance of the frameReader struct
-func newFrameReader(rc io.ReadCloser, contentType ContentType) *frameReader {
- return &frameReader{
- rc: rc,
- bufSize: defaultBufSize,
- maxFrameSize: defaultMaxFrameSize,
- contentType: contentType,
- }
-}
-
-// frameReader is a FrameReader implementation
-type frameReader struct {
- rc io.ReadCloser
- bufSize int
- maxFrameSize int
- contentType ContentType
-
- // TODO: Maybe add mutexes for thread-safety (so no two goroutines read at the same time)
-}
-
-// ReadFrame reads one frame from the underlying io.Reader. ReadFrame
-// keeps on reading from the Reader in bufSize blocks, until the Reader either
-// returns err == nil or EOF. If the Reader reports an ErrShortBuffer error,
-// ReadFrame keeps on reading using new calls. ReadFrame might return both data and
-// io.EOF. io.EOF will be returned in the final call.
-func (rf *frameReader) ReadFrame() (frame []byte, err error) {
- // Temporary buffer to parts of a frame into
- var buf []byte
- // How many bytes were read by the read call
- var n int
- // Multiplier for bufsize
- c := 1
- for {
- // Allocate a buffer of a multiple of bufSize.
- buf = make([]byte, c*rf.bufSize)
- // Call the underlying reader.
- n, err = rf.rc.Read(buf)
- // Append the returned bytes to the b slice returned
- // If n is 0, this call is a no-op
- frame = append(frame, buf[:n]...)
-
- // If the frame got bigger than the max allowed size, return and report the error
- if len(frame) > rf.maxFrameSize {
- err = FrameOverflowErr
- return
- }
-
- // Handle different kinds of errors
- switch err {
- case io.ErrShortBuffer:
- // ignore the "buffer too short" error, and just keep on reading, now doubling the buffer
- c *= 2
- continue
- case nil:
- // One document is "done reading", we should return it if valid
- // Only return non-empty documents, i.e. skip e.g. leading `---`
- if len(bytes.TrimSpace(frame)) > 0 {
- // valid non-empty document
- return
- }
- // The document was empty, reset the frame (just to be sure) and continue
- frame = nil
- continue
- case io.EOF:
- // we reached the end of the file, close the reader and return
- rf.rc.Close()
- return
- default:
- // unknown error, return it immediately
- // TODO: Maybe return the error here?
- return
- }
- }
-}
-
-// ContentType returns the content type for the given FrameReader
-func (rf *frameReader) ContentType() ContentType {
- return rf.contentType
-}
-
-// Close implements io.Closer and closes the underlying ReadCloser
-func (rf *frameReader) Close() error {
- return rf.rc.Close()
-}
-
-// FromFile returns a ReadCloser from the given file, or a ReadCloser which returns
-// the given file open error when read.
-func FromFile(filePath string) ReadCloser {
- f, err := os.Open(filePath)
- if err != nil {
- return &errReadCloser{err}
- }
- return f
-}
-
-// FromBytes returns a ReadCloser from the given byte content.
-func FromBytes(content []byte) ReadCloser {
- return ioutil.NopCloser(bytes.NewReader(content))
-}
diff --git a/pkg/serializer/frame_reader_test.go b/pkg/serializer/frame_reader_test.go
deleted file mode 100644
index a696ed7d..00000000
--- a/pkg/serializer/frame_reader_test.go
+++ /dev/null
@@ -1,114 +0,0 @@
-package serializer
-
-import (
- "io"
- "io/ioutil"
- "reflect"
- "strings"
- "testing"
-
- "k8s.io/apimachinery/pkg/runtime/serializer/json"
-)
-
-const (
- fooYAML = `kind: Foo
-apiVersion: bar/v1
-a: b1234567890
-c: d1234567890
-e: f1234567890
-hello: true`
-
- barYAML = `kind: Bar
-apiVersion: foo/v1
-a: b1234567890
-c: d1234567890
-e: f1234567890
-hello: false`
-
- bazYAML = `baz: true`
-
- testYAML = "\n---\n" + fooYAML + "\n---\n" + barYAML + "\n---\n" + bazYAML
-)
-
-func Test_FrameReader_ReadFrame(t *testing.T) {
- testYAMLReadCloser := json.YAMLFramer.NewFrameReader(ioutil.NopCloser(strings.NewReader(testYAML)))
-
- type fields struct {
- rc io.ReadCloser
- bufSize int
- maxFrameSize int
- }
- type result struct {
- wantB []byte
- wantErr bool
- }
- tests := []struct {
- name string
- fields fields
- wants []result
- }{
- {
- name: "three-document YAML case",
- fields: fields{
- rc: testYAMLReadCloser,
- bufSize: 16,
- maxFrameSize: 1024,
- },
- wants: []result{
- {
- wantB: []byte(fooYAML),
- wantErr: false,
- },
- {
- wantB: []byte(barYAML),
- wantErr: false,
- },
- {
- wantB: []byte(bazYAML),
- wantErr: false,
- },
- {
- wantB: nil,
- wantErr: true,
- },
- },
- },
- {
- name: "maximum size reached",
- fields: fields{
- rc: testYAMLReadCloser,
- bufSize: 16,
- maxFrameSize: 32,
- },
- wants: []result{
- {
- wantB: nil,
- wantErr: true,
- },
- },
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- rf := &frameReader{
- rc: tt.fields.rc,
- bufSize: tt.fields.bufSize,
- maxFrameSize: tt.fields.maxFrameSize,
- }
- for _, expected := range tt.wants {
- gotB, err := rf.ReadFrame()
- if (err != nil) != expected.wantErr {
- t.Errorf("frameReader.ReadFrame() error = %v, wantErr %v", err, expected.wantErr)
- return
- }
- if len(gotB) < len(expected.wantB) {
- t.Errorf("frameReader.ReadFrame(): got smaller slice %v than expected %v", gotB, expected.wantB)
- return
- }
- if !reflect.DeepEqual(gotB[:len(expected.wantB)], expected.wantB) {
- t.Errorf("frameReader.ReadFrame() = %v, want %v", gotB, expected.wantB)
- }
- }
- })
- }
-}
diff --git a/pkg/serializer/frame_utils.go b/pkg/serializer/frame_utils.go
deleted file mode 100644
index 12c65e16..00000000
--- a/pkg/serializer/frame_utils.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package serializer
-
-import "io"
-
-// FrameList is a list of frames (byte arrays), used for convenience functions
-type FrameList [][]byte
-
-// ReadFrameList is a convenience method that reads all available frames from the FrameReader
-// into a returned FrameList
-func ReadFrameList(fr FrameReader) (FrameList, error) {
- // TODO: Create an unit test for this function
- var frameList [][]byte
- for {
- // Read until we get io.EOF or an error
- frame, err := fr.ReadFrame()
- if err == io.EOF {
- break
- } else if err != nil {
- return nil, err
- }
- // Append all frames to the returned list
- frameList = append(frameList, frame)
- }
- return frameList, nil
-}
-
-// WriteFrameList is a convenience method that writes a set of frames to a FrameWriter
-func WriteFrameList(fw FrameWriter, frameList FrameList) error {
- // TODO: Create an unit test for this function
- // Loop all frames in the list, and write them individually to the FrameWriter
- for _, frame := range frameList {
- if _, err := fw.Write(frame); err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/pkg/serializer/frame_writer.go b/pkg/serializer/frame_writer.go
deleted file mode 100644
index d2f0fc45..00000000
--- a/pkg/serializer/frame_writer.go
+++ /dev/null
@@ -1,128 +0,0 @@
-package serializer
-
-import (
- "io"
-)
-
-const (
- yamlSeparator = "---\n"
-)
-
-// Writer in this package is an alias for io.Writer. It helps in Godoc to locate
-// helpers in this package which returns writers (i.e. ToBytes)
-type Writer io.Writer
-
-// FrameWriter is a ContentType-specific io.Writer that writes given frames in an applicable way
-// to an underlying io.Writer stream
-type FrameWriter interface {
- ContentTyped
- Writer
-}
-
-// NewFrameWriter returns a new FrameWriter for the given Writer and ContentType
-func NewFrameWriter(contentType ContentType, w Writer) FrameWriter {
- switch contentType {
- case ContentTypeYAML:
- // Use our own implementation of the underlying YAML FrameWriter
- return &frameWriter{newYAMLWriter(w), contentType}
- case ContentTypeJSON:
- // Comment from k8s.io/apimachinery/pkg/runtime/serializer/json.Framer.NewFrameWriter:
- // "we can write JSON objects directly to the writer, because they are self-framing"
- // Hence, we directly use w without any modifications.
- return &frameWriter{w, contentType}
- default:
- return &errFrameWriter{ErrUnsupportedContentType, contentType}
- }
-}
-
-// NewYAMLFrameWriter returns a FrameWriter that writes YAML frames separated by "---\n"
-//
-// This call is the same as NewFrameWriter(ContentTypeYAML, w)
-func NewYAMLFrameWriter(w Writer) FrameWriter {
- return NewFrameWriter(ContentTypeYAML, w)
-}
-
-// NewJSONFrameWriter returns a FrameWriter that writes JSON frames without separation
-// (i.e. "{ ... }{ ... }{ ... }" on the wire)
-//
-// This call is the same as NewFrameWriter(ContentTypeYAML, w)
-func NewJSONFrameWriter(w Writer) FrameWriter {
- return NewFrameWriter(ContentTypeJSON, w)
-}
-
-// frameWriter is an implementation of the FrameWriter interface
-type frameWriter struct {
- Writer
-
- contentType ContentType
-
- // TODO: Maybe add mutexes for thread-safety (so no two goroutines write at the same time)
-}
-
-// ContentType returns the content type for the given FrameWriter
-func (wf *frameWriter) ContentType() ContentType {
- return wf.contentType
-}
-
-// newYAMLWriter returns a new yamlWriter implementation
-func newYAMLWriter(w Writer) *yamlWriter {
- return &yamlWriter{
- w: w,
- hasWritten: false,
- }
-}
-
-// yamlWriter writes yamlSeparator between documents
-type yamlWriter struct {
- w io.Writer
- hasWritten bool
-}
-
-// Write implements io.Writer
-func (w *yamlWriter) Write(p []byte) (n int, err error) {
- // If we've already written some documents, add the separator in between
- if w.hasWritten {
- _, err = w.w.Write([]byte(yamlSeparator))
- if err != nil {
- return
- }
- }
-
- // Write the given bytes to the underlying writer
- n, err = w.w.Write(p)
- if err != nil {
- return
- }
-
- // Mark that we've now written once and should write the separator in between
- w.hasWritten = true
- return
-}
-
-// ToBytes returns a Writer which can be passed to NewFrameWriter. The Writer writes directly
-// to an underlying byte array. The byte array must be of enough length in order to write.
-func ToBytes(p []byte) Writer {
- return &byteWriter{p, 0}
-}
-
-type byteWriter struct {
- to []byte
- // the next index to write to
- index int
-}
-
-func (w *byteWriter) Write(from []byte) (n int, err error) {
- // Check if we have space in to, in order to write bytes there
- if w.index+len(from) > len(w.to) {
- err = io.ErrShortBuffer
- return
- }
- // Copy over the bytes one by one
- for i := range from {
- w.to[w.index+i] = from[i]
- }
- // Increase the index for the next Write call's target position
- w.index += len(from)
- n += len(from)
- return
-}
diff --git a/pkg/serializer/frame_writer_test.go b/pkg/serializer/frame_writer_test.go
deleted file mode 100644
index 988dacbc..00000000
--- a/pkg/serializer/frame_writer_test.go
+++ /dev/null
@@ -1,66 +0,0 @@
-package serializer
-
-import (
- "bytes"
- "testing"
-)
-
-func Test_byteWriter_Write(t *testing.T) {
- type fields struct {
- to []byte
- index int
- }
- type args struct {
- from []byte
- }
- tests := []struct {
- name string
- fields fields
- args args
- wantN int
- wantErr bool
- }{
- {
- name: "simple case",
- fields: fields{
- to: make([]byte, 50),
- },
- args: args{
- from: []byte("Hello!\nFoobar"),
- },
- wantN: 13,
- wantErr: false,
- },
- {
- name: "target too short",
- fields: fields{
- to: make([]byte, 10),
- },
- args: args{
- from: []byte("Hello!\nFoobar"),
- },
- wantN: 0,
- wantErr: true,
- },
- }
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- w := &byteWriter{
- to: tt.fields.to,
- index: tt.fields.index,
- }
- gotN, err := w.Write(tt.args.from)
- if (err != nil) != tt.wantErr {
- t.Errorf("byteWriter.Write() error = %v, wantErr %v", err, tt.wantErr)
- return
- }
- if gotN != tt.wantN {
- t.Errorf("byteWriter.Write() = %v, want %v", gotN, tt.wantN)
- return
- }
- if !tt.wantErr && !bytes.Equal(tt.fields.to[:gotN], tt.args.from) {
- t.Errorf("byteWriter.Write(): expected fields.to (%s) to equal args.from (%s), but didn't", tt.fields.to[:gotN], tt.args.from)
- }
- })
- }
-}
diff --git a/pkg/serializer/options.go b/pkg/serializer/options.go
new file mode 100644
index 00000000..e4d8fe4a
--- /dev/null
+++ b/pkg/serializer/options.go
@@ -0,0 +1,256 @@
+package serializer
+
+import (
+ "k8s.io/utils/pointer"
+)
+
+type EncodeOption interface {
+ ApplyToEncode(*EncodeOptions)
+}
+
+func defaultEncodeOpts() *EncodeOptions {
+ return &EncodeOptions{
+ // Default to "pretty encoding"
+ JSONIndent: pointer.Int32Ptr(2),
+ PreserveComments: PreserveCommentsDisable,
+ }
+}
+
+type EncodeOptions struct {
+ // Indent JSON encoding output with this many spaces.
+ // Set this to 0, use PrettyEncode(false) or JSONIndent(0) to disable pretty output.
+ // Only applicable to ContentTypeJSON framers.
+ //
+ // Default: 2, i.e. pretty output
+ // TODO: Make this a property of the FrameWriter instead?
+ JSONIndent *int32
+
+ // Whether to preserve YAML comments internally.
+ // This only works for objects embedding metav1.ObjectMeta.
+ //
+ // Only applicable to ContentTypeYAML framers. Using any other framer will be silently ignored.
+ //
+ // Usage of this option also requires setting the PreserveComments in DecodeOptions, too.
+ //
+ // Default: PreserveCommentsDisable
+ PreserveComments PreserveComments
+
+ // TODO: Maybe consider an option to always convert to the preferred version (not just internal)
+}
+
+var _ EncodeOption = &EncodeOptions{}
+
+func (o *EncodeOptions) ApplyToEncode(target *EncodeOptions) {
+ if o.JSONIndent != nil {
+ target.JSONIndent = o.JSONIndent
+ }
+ if o.PreserveComments != 0 {
+ target.PreserveComments = o.PreserveComments
+ }
+}
+
+func (o *EncodeOptions) ApplyOptions(opts []EncodeOption) *EncodeOptions {
+ for _, opt := range opts {
+ opt.ApplyToEncode(o)
+ }
+ // it is guaranteed that all options are non-nil, as defaultEncodeOpts() includes all fields
+ return o
+}
+
+// Whether to preserve YAML comments internally.
+// This only works for objects embedding metav1.ObjectMeta.
+//
+// Only applicable to ContentTypeYAML framers. Using any other framer will be silently ignored.
+// TODO: Add a BestEffort mode
+type PreserveComments int
+
+const (
+ // PreserveCommentsDisable means do not try to preserve comments
+ PreserveCommentsDisable PreserveComments = 1 + iota
+ // PreserveCommentsStrict means try to preserve comments, and fail if it does not work
+ PreserveCommentsStrict
+)
+
+var _ EncodeOption = PreserveComments(0)
+var _ DecodeOption = PreserveComments(0)
+
+func (p PreserveComments) ApplyToEncode(target *EncodeOptions) {
+ // TODO: Validate?
+ target.PreserveComments = p
+}
+
+func (p PreserveComments) ApplyToDecode(target *DecodeOptions) {
+ // TODO: Validate?
+ target.PreserveComments = p
+}
+
+// Indent JSON encoding output with this many spaces.
+// Use PrettyEncode(false) or JSONIndent(0) to disable pretty output.
+// Only applicable to ContentTypeJSON framers.
+type JSONIndent int32
+
+var _ EncodeOption = JSONIndent(0)
+
+func (i JSONIndent) ApplyToEncode(target *EncodeOptions) {
+ target.JSONIndent = pointer.Int32Ptr(int32(i))
+}
+
+// Shorthand for JSONIndent(0) if false, or JSONIndent(2) if true
+type PrettyEncode bool
+
+var _ EncodeOption = PrettyEncode(false)
+
+func (pretty PrettyEncode) ApplyToEncode(target *EncodeOptions) {
+ if pretty {
+ JSONIndent(2).ApplyToEncode(target)
+ } else {
+ JSONIndent(0).ApplyToEncode(target)
+ }
+}
+
+// DECODING
+
+type DecodeOption interface {
+ ApplyToDecode(*DecodeOptions)
+}
+
+func defaultDecodeOpts() *DecodeOptions {
+ return &DecodeOptions{
+ ConvertToHub: pointer.BoolPtr(false),
+ Strict: pointer.BoolPtr(true),
+ Default: pointer.BoolPtr(false),
+ DecodeListElements: pointer.BoolPtr(true),
+ PreserveComments: PreserveCommentsDisable,
+ DecodeUnknown: pointer.BoolPtr(false),
+ }
+}
+
+type DecodeOptions struct {
+ // Not applicable for Decoder.DecodeInto(). If true, the decoded external object
+ // will be converted into its hub (or internal, where applicable) representation.
+ // Otherwise, the decoded object will be left in its external representation.
+ //
+ // Default: false
+ ConvertToHub *bool
+
+ // Parse the YAML/JSON in strict mode, returning a specific error if the input
+ // contains duplicate or unknown fields or formatting errors.
+ //
+ // Default: true
+ Strict *bool
+
+ // Automatically default the decoded object.
+ // Default: false
+ Default *bool
+
+ // Only applicable for Decoder.DecodeAll(). If the underlying data contains a v1.List,
+ // the items of the list will be traversed, decoded into their respective types, and
+ // appended to the returned slice. The v1.List will in this case not be returned.
+ // This conversion does NOT support preserving comments. If the given scheme doesn't
+ // recognize the v1.List, before using it will be registered automatically.
+ //
+ // Default: true
+ DecodeListElements *bool
+
+ // Whether to preserve YAML comments internally.
+ // This only works for objects embedding metav1.ObjectMeta.
+ //
+ // Only applicable to ContentTypeYAML framers. Using any other framer will be silently ignored.
+ //
+ // Usage of this option also requires setting the PreserveComments in EncodeOptions, too.
+ //
+ // Default: PreserveCommentsDisable
+ PreserveComments PreserveComments
+
+ // DecodeUnknown specifies whether decode objects with an unknown GroupVersionKind into a
+ // *runtime.Unknown object when running Decode(All) (true value) or to return an error when
+ // any unrecognized type is found (false value).
+ //
+ // Default: false
+ DecodeUnknown *bool
+}
+
+var _ DecodeOption = &DecodeOptions{}
+
+func (o *DecodeOptions) ApplyToDecode(target *DecodeOptions) {
+ if o.ConvertToHub != nil {
+ target.ConvertToHub = o.ConvertToHub
+ }
+ if o.Strict != nil {
+ target.Strict = o.Strict
+ }
+ if o.Default != nil {
+ target.Default = o.Default
+ }
+ if o.DecodeListElements != nil {
+ target.DecodeListElements = o.DecodeListElements
+ }
+ if o.PreserveComments != 0 {
+ target.PreserveComments = o.PreserveComments
+ }
+ if o.DecodeUnknown != nil {
+ target.DecodeUnknown = o.DecodeUnknown
+ }
+}
+
+func (o *DecodeOptions) ApplyOptions(opts []DecodeOption) *DecodeOptions {
+ for _, opt := range opts {
+ opt.ApplyToDecode(o)
+ }
+ // it is guaranteed that all options are non-nil, as defaultDecodeOpts() includes all fields
+ return o
+}
+
+// Not applicable for Decoder.DecodeInto(). If true, the decoded external object
+// will be converted into its hub (or internal, where applicable) representation.
+// Otherwise, the decoded object will be left in its external representation.
+type ConvertToHub bool
+
+var _ DecodeOption = ConvertToHub(false)
+
+func (b ConvertToHub) ApplyToDecode(target *DecodeOptions) {
+ target.ConvertToHub = pointer.BoolPtr(bool(b))
+}
+
+// Parse the YAML/JSON in strict mode, returning a specific error if the input
+// contains duplicate or unknown fields or formatting errors.
+type DecodeStrict bool
+
+var _ DecodeOption = DecodeStrict(false)
+
+func (b DecodeStrict) ApplyToDecode(target *DecodeOptions) {
+ target.Strict = pointer.BoolPtr(bool(b))
+}
+
+// Automatically default the decoded object.
+type DefaultAtDecode bool
+
+var _ DecodeOption = DefaultAtDecode(false)
+
+func (b DefaultAtDecode) ApplyToDecode(target *DecodeOptions) {
+ target.Default = pointer.BoolPtr(bool(b))
+}
+
+// Only applicable for Decoder.DecodeAll(). If the underlying data contains a v1.List,
+// the items of the list will be traversed, decoded into their respective types, and
+// appended to the returned slice. The v1.List will in this case not be returned.
+// This conversion does NOT support preserving comments. If the given scheme doesn't
+// recognize the v1.List, before using it will be registered automatically.
+type DecodeListElements bool
+
+var _ DecodeOption = DecodeListElements(false)
+
+func (b DecodeListElements) ApplyToDecode(target *DecodeOptions) {
+ target.DecodeListElements = pointer.BoolPtr(bool(b))
+}
+
+// DecodeUnknown specifies whether decode objects with an unknown GroupVersionKind into a
+// *runtime.Unknown object when running Decode(All) (true value) or to return an error when
+// any unrecognized type is found (false value).
+type DecodeUnknown bool
+
+var _ DecodeOption = DecodeUnknown(false)
+
+func (b DecodeUnknown) ApplyToDecode(target *DecodeOptions) {
+ target.DecodeUnknown = pointer.BoolPtr(bool(b))
+}
diff --git a/pkg/serializer/patch.go b/pkg/serializer/patch.go
new file mode 100644
index 00000000..c2b17379
--- /dev/null
+++ b/pkg/serializer/patch.go
@@ -0,0 +1,144 @@
+package serializer
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+
+ "github.com/weaveworks/libgitops/pkg/content"
+ "github.com/weaveworks/libgitops/pkg/frame"
+ "github.com/weaveworks/libgitops/pkg/util/patch"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/util/strategicpatch"
+ openapi "k8s.io/kube-openapi/pkg/util/proto"
+)
+
+// TODO: Move pkg/util/patch under pkg/serializer?
+
+type Patcher interface {
+ // ApplyOnStruct applies the given patch (JSON-encoded) using the given BytePatcher
+ // (that knows how to operate on that kind of patch type) into obj.
+ //
+ // obj MUST be a typed object. Unversioned, partial or unstructured objects are not
+ // supported. For those use-cases, convert your object into an unstructured one, and
+ // pass it to ApplyOnUnstructured.
+ //
+ // obj MUST NOT be an internal type. If you operate on an internal object as your "hub",
+ // convert the object yourself first to the GroupVersion of the patch bytes, and then
+ // convert back after this call.
+ //
+ // In case the patch would require knowledge about the schema (e.g. StrategicMergePatch),
+ // this function looks that metadata up using reflection of obj.
+ ApplyOnStruct(bytePatcher patch.BytePatcher, patch []byte, obj runtime.Object) error
+
+ // ApplyOnUnstructured applies the given patch (JSON-encoded) using the given BytePatcher
+ // (that knows how to operate on that kind of patch type) into the unstructured obj.
+ //
+ // If knowledge about the schema is required by the patch type (e.g. StrategicMergePatch),
+ // it is the liability of the caller to provide an OpenAPI schema.
+ ApplyOnUnstructured(bytePatcher patch.BytePatcher, patch []byte, obj runtime.Unstructured, schema openapi.Schema) error
+
+ // Encoder gets the underlying Encoder
+ Encoder() Encoder
+
+ // Decoder gets the underlying Decoder
+ Decoder() Decoder
+}
+
+func NewPatcher(encoder Encoder, decoder Decoder) Patcher {
+ // It shouldn't matter if we use the LockedScheme from the encoder or decoder
+ // TODO: Does this work with pretty encoders?
+ return &patcher{encoder.GetLockedScheme(), encoder, decoder}
+}
+
+type patcher struct {
+ LockedScheme
+ encoder Encoder
+ decoder Decoder
+}
+
+func (p *patcher) Encoder() Encoder {
+ return p.encoder
+}
+
+func (p *patcher) Decoder() Decoder {
+ return p.decoder
+}
+
+// ApplyOnStruct applies the given patch (JSON-encoded) using the given BytePatcher
+// (that knows how to operate on that kind of patch type) into obj.
+//
+// obj MUST be a typed object. Unversioned, partial or unstructured objects are not
+// supported. For those use-cases, convert your object into an unstructured one, and
+// pass it to ApplyOnUnstructured.
+//
+// obj MUST NOT be an internal type. If you operate on an internal object as your "hub",
+// convert the object yourself first to the GroupVersion of the patch bytes, and then
+// convert back after this call.
+//
+// In case the patch would require knowledge about the schema (e.g. StrategicMergePatch),
+// this function looks that metadata up using reflection of obj.
+func (p *patcher) ApplyOnStruct(bytePatcher patch.BytePatcher, patch []byte, obj runtime.Object) error {
+ // Require that obj is typed
+ if !IsTyped(obj, p.Scheme()) {
+ return errors.New("obj must be typed")
+ }
+ // Get the GVK so we can check if obj is internal
+ gvk, err := GVKForObject(p.Scheme(), obj)
+ if err != nil {
+ return err
+ }
+ // It must not be internal, as we will encode it soon.
+ if gvk.Version == runtime.APIVersionInternal {
+ return errors.New("obj must not be internal")
+ }
+
+ // Encode without conversion to the buffer
+ var buf bytes.Buffer
+ if err := p.encoder.EncodeForGroupVersion(frame.NewJSONWriter(content.ToBuffer(&buf)), obj, gvk.GroupVersion()); err != nil {
+ return err
+ }
+
+ // Get the schema in case needed by the BytePatcher
+ schema, err := strategicpatch.NewPatchMetaFromStruct(obj)
+ if err != nil {
+ return err
+ }
+
+ // Apply the patch, and get the new JSON out
+ newJSON, err := bytePatcher.Apply(buf.Bytes(), patch, schema)
+ if err != nil {
+ return err
+ }
+
+ // Decode into the object to apply the changes
+ fr := frame.NewSingleJSONReader(content.FromBytes(newJSON))
+ if err := p.decoder.DecodeInto(fr, obj); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (p *patcher) ApplyOnUnstructured(bytePatcher patch.BytePatcher, patch []byte, obj runtime.Unstructured, schema openapi.Schema) error {
+ // Marshal the object to form the source JSON
+ sourceJSON, err := json.Marshal(obj)
+ if err != nil {
+ return err
+ }
+
+ // Conditionally get the schema from the provided OpenAPI spec
+ var patchMeta strategicpatch.LookupPatchMeta
+ if schema != nil {
+ patchMeta = strategicpatch.NewPatchMetaFromOpenAPI(schema)
+ }
+
+ // Apply the patch, and get the new JSON out
+ newJSON, err := bytePatcher.Apply(sourceJSON, patch, patchMeta)
+ if err != nil {
+ return err
+ }
+
+ // Decode back into obj
+ return json.Unmarshal(newJSON, obj)
+}
diff --git a/pkg/serializer/serializer.go b/pkg/serializer/serializer.go
index eb798c91..c1a4ca8f 100644
--- a/pkg/serializer/serializer.go
+++ b/pkg/serializer/serializer.go
@@ -2,13 +2,14 @@ package serializer
import (
"errors"
- "fmt"
+ "github.com/weaveworks/libgitops/pkg/frame"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
k8sserializer "k8s.io/apimachinery/pkg/runtime/serializer"
)
+/*
// ContentType specifies a content type for Encoders, Decoders, FrameWriters and FrameReaders
type ContentType string
@@ -20,17 +21,24 @@ const (
// ContentTypeYAML specifies usage of YAML as the content type.
// It is an alias for k8s.io/apimachinery/pkg/runtime.ContentTypeYAML
ContentTypeYAML = ContentType(runtime.ContentTypeYAML)
-)
+)*/
-// ErrUnsupportedContentType is returned if the specified content type isn't supported
-var ErrUnsupportedContentType = errors.New("unsupported content type")
+var (
+ // ErrUnsupportedContentType is returned if the specified content type isn't supported
+ //ErrUnsupportedContentType = errors.New("unsupported content type")
+ // ErrObjectIsNotList is returned when a runtime.Object was not a List type
+ ErrObjectIsNotList = errors.New("given runtime.Object is not a *List type, or does not implement metav1.ListInterface")
+)
+/*
// ContentTyped is an interface for objects that are specific to a set ContentType.
type ContentTyped interface {
// ContentType returns the ContentType (usually ContentTypeYAML or ContentTypeJSON) for the given object.
ContentType() ContentType
}
+func (ct ContentType) ContentType() ContentType { return ct }
+*/
// Serializer is an interface providing high-level decoding/encoding functionality
// for types registered in a *runtime.Scheme
type Serializer interface {
@@ -38,13 +46,13 @@ type Serializer interface {
// a FrameWriter. The decoder can be customized by passing some options (e.g. WithDecodingOptions)
// to this call.
// The decoder supports both "classic" API Machinery objects and controller-runtime CRDs
- Decoder(optsFn ...DecodingOptionsFunc) Decoder
+ Decoder(optsFn ...DecodeOption) Decoder
// Encoder is a high-level interface for encoding Kubernetes API Machinery objects and writing them
// to a FrameWriter. The encoder can be customized by passing some options (e.g. WithEncodingOptions)
// to this call.
// The encoder supports both "classic" API Machinery objects and controller-runtime CRDs
- Encoder(optsFn ...EncodingOptionsFunc) Encoder
+ Encoder(optsFn ...EncodeOption) Encoder
// Converter is a high-level interface for converting objects between different versions
// The converter supports both "classic" API Machinery objects and controller-runtime CRDs
@@ -53,18 +61,16 @@ type Serializer interface {
// Defaulter is a high-level interface for accessing defaulting functions in a scheme
Defaulter() Defaulter
- // Scheme provides access to the underlying runtime.Scheme, may be used for low-level access to
- // the "type universe" and advanced conversion/defaulting features
- Scheme() *runtime.Scheme
+ Patcher() Patcher
- // Codecs provides access to the underlying serializer.CodecFactory, may be used if low-level access
- // is needed for encoding and decoding
- Codecs() *k8sserializer.CodecFactory
-}
+ // SchemeLock exposes the underlying LockedScheme.
+ // A Scheme provides access to the underlying runtime.Scheme, may be used for low-level access to
+ // the "type universe" and advanced conversion/defaulting features.
+ GetLockedScheme() LockedScheme
-type schemeAndCodec struct {
- scheme *runtime.Scheme
- codecs *k8sserializer.CodecFactory
+ // CodecFactory provides access to the underlying CodecFactory, may be used if low-level access
+ // is needed for encoding and decoding.
+ CodecFactory() *k8sserializer.CodecFactory
}
// Encoder is a high-level interface for encoding Kubernetes API Machinery objects and writing them
@@ -74,12 +80,18 @@ type Encoder interface {
// The FrameWriter specifies the ContentType. This encoder will automatically convert any
// internal object given to the preferred external groupversion. No conversion will happen
// if the given object is of an external version.
- Encode(fw FrameWriter, obj ...runtime.Object) error
+ Encode(fw frame.Writer, obj ...runtime.Object) error
// EncodeForGroupVersion encodes the given object for the specific groupversion. If the object
// is not of that version currently it will try to convert. The output bytes are written to the
// FrameWriter. The FrameWriter specifies the ContentType.
- EncodeForGroupVersion(fw FrameWriter, obj runtime.Object, gv schema.GroupVersion) error
+ EncodeForGroupVersion(fw frame.Writer, obj runtime.Object, gv schema.GroupVersion) error
+
+ // SchemeLock exposes the underlying LockedScheme
+ GetLockedScheme() LockedScheme
+
+ // CodecFactory exposes the underlying CodecFactory
+ CodecFactory() *k8sserializer.CodecFactory
}
// Decoder is a high-level interface for decoding Kubernetes API Machinery objects read from
@@ -101,7 +113,7 @@ type Decoder interface {
// If opts.DecodeUnknown is true, any type with an unrecognized apiVersion/kind will be returned as a
// *runtime.Unknown object instead of returning a UnrecognizedTypeError.
// opts.DecodeListElements is not applicable in this call.
- Decode(fr FrameReader) (runtime.Object, error)
+ Decode(fr frame.Reader) (runtime.Object, error)
// DecodeInto decodes the next document in the FrameReader stream into obj if the types are matching.
// If there are multiple documents in the underlying stream, this call will read one
@@ -120,7 +132,7 @@ type Decoder interface {
// opts.DecodeUnknown is not applicable in this call. In case you want to decode an object into a
// *runtime.Unknown, just create a runtime.Unknown object and pass the pointer as obj into DecodeInto
// and it'll work.
- DecodeInto(fr FrameReader, obj runtime.Object) error
+ DecodeInto(fr frame.Reader, obj runtime.Object) error
// DecodeAll returns the decoded objects from all documents in the FrameReader stream. The underlying
// stream is automatically closed on io.EOF. io.EOF is never returned from this function.
@@ -137,7 +149,10 @@ type Decoder interface {
// added into the returning slice. The v1.List will in this case not be returned.
// If opts.DecodeUnknown is true, any type with an unrecognized apiVersion/kind will be returned as a
// *runtime.Unknown object instead of returning a UnrecognizedTypeError.
- DecodeAll(fr FrameReader) ([]runtime.Object, error)
+ DecodeAll(fr frame.Reader) ([]runtime.Object, error)
+
+ // SchemeLock exposes the underlying LockedScheme
+ GetLockedScheme() LockedScheme
}
// Converter is an interface that allows access to object conversion capabilities
@@ -157,6 +172,9 @@ type Converter interface {
// or the sigs.k8s.io/controller-runtime/pkg/conversion.Hub for the given conversion.Convertible object in
// the "in" argument. No defaulting is performed.
ConvertToHub(in runtime.Object) (runtime.Object, error)
+
+ // SchemeLock exposes the underlying LockedScheme
+ GetLockedScheme() LockedScheme
}
// Defaulter is a high-level interface for accessing defaulting functions in a scheme
@@ -172,6 +190,9 @@ type Defaulter interface {
// scheme.Default(obj), but with extra logic to cover also internal versions.
// Important to note here is that the TypeMeta information is NOT applied automatically.
NewDefaultedObject(gvk schema.GroupVersionKind) (runtime.Object, error)
+
+ // SchemeLock exposes the underlying LockedScheme
+ GetLockedScheme() LockedScheme
}
// NewSerializer constructs a new serializer based on a scheme, and optionally a codecfactory
@@ -186,43 +207,43 @@ func NewSerializer(scheme *runtime.Scheme, codecs *k8sserializer.CodecFactory) S
*codecs = k8sserializer.NewCodecFactory(scheme)
}
+ schemeLock := newLockedScheme(scheme)
+
return &serializer{
- schemeAndCodec: &schemeAndCodec{
- scheme: scheme,
- codecs: codecs,
- },
- converter: newConverter(scheme),
- defaulter: newDefaulter(scheme),
+ LockedScheme: schemeLock,
+ codecs: codecs,
+ converter: NewConverter(schemeLock),
+ defaulter: NewDefaulter(schemeLock),
+ patcher: NewPatcher(
+ NewEncoder(schemeLock, codecs, PrettyEncode(true)),
+ NewDecoder(schemeLock),
+ ),
}
}
// serializer implements the Serializer interface
type serializer struct {
- *schemeAndCodec
+ LockedScheme
+ codecs *k8sserializer.CodecFactory
converter *converter
- defaulter *defaulter
+ defaulter Defaulter
+ patcher Patcher
}
-// Scheme provides access to the underlying runtime.Scheme, may be used for low-level access to
-// the "type universe" and advanced conversion/defaulting features
-func (s *serializer) Scheme() *runtime.Scheme {
- return s.scheme
+func (s *serializer) GetLockedScheme() LockedScheme {
+ return s.LockedScheme
}
-// Codecs provides access to the underlying serializer.CodecFactory, may be used if low-level access
-// is needed for encoding and decoding
-func (s *serializer) Codecs() *k8sserializer.CodecFactory {
+func (s *serializer) CodecFactory() *k8sserializer.CodecFactory {
return s.codecs
}
-func (s *serializer) Decoder(optFns ...DecodingOptionsFunc) Decoder {
- opts := newDecodeOpts(optFns...)
- return newDecoder(s.schemeAndCodec, *opts)
+func (s *serializer) Decoder(opts ...DecodeOption) Decoder {
+ return NewDecoder(s.LockedScheme, opts...)
}
-func (s *serializer) Encoder(optFns ...EncodingOptionsFunc) Encoder {
- opts := newEncodeOpts(optFns...)
- return newEncoder(s.schemeAndCodec, *opts)
+func (s *serializer) Encoder(opts ...EncodeOption) Encoder {
+ return NewEncoder(s.LockedScheme, s.codecs, opts...)
}
func (s *serializer) Converter() Converter {
@@ -233,32 +254,6 @@ func (s *serializer) Defaulter() Defaulter {
return s.defaulter
}
-func prioritizedVersionForGroup(scheme *runtime.Scheme, groupName string) (schema.GroupVersion, error) {
- // Get the prioritized versions for the given group
- gvs := scheme.PrioritizedVersionsForGroup(groupName)
- if len(gvs) < 1 {
- return schema.GroupVersion{}, fmt.Errorf("expected some version to be registered for group %s", groupName)
- }
- // Use the first, preferred, (external) version
- return gvs[0], nil
-}
-
-func GVKForObject(scheme *runtime.Scheme, obj runtime.Object) (schema.GroupVersionKind, error) {
- // If we already have TypeMeta filled in here, just use it
- // TODO: This is probably not needed
- gvk := obj.GetObjectKind().GroupVersionKind()
- if !gvk.Empty() {
- return gvk, nil
- }
-
- // TODO: If there are two GVKs returned, it's probably a misconfiguration in the scheme
- // It might be expected though, and we can tolerate setting the GVK manually IFF there are more than
- // one ObjectKind AND the given GVK is one of them.
-
- // Get the possible kinds for the object
- gvks, unversioned, err := scheme.ObjectKinds(obj)
- if unversioned || err != nil || len(gvks) != 1 {
- return schema.GroupVersionKind{}, fmt.Errorf("unversioned %t or err %v or invalid gvks %v", unversioned, err, gvks)
- }
- return gvks[0], nil
+func (s *serializer) Patcher() Patcher {
+ return s.patcher
}
diff --git a/pkg/serializer/serializer_test.go b/pkg/serializer/serializer_test.go
index ba239855..260e6e16 100644
--- a/pkg/serializer/serializer_test.go
+++ b/pkg/serializer/serializer_test.go
@@ -7,6 +7,9 @@ import (
"strings"
"testing"
+ "github.com/stretchr/testify/assert"
+ "github.com/weaveworks/libgitops/pkg/content"
+ "github.com/weaveworks/libgitops/pkg/frame"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
@@ -21,8 +24,8 @@ var (
codecs = k8sserializer.NewCodecFactory(scheme)
ourserializer = NewSerializer(scheme, &codecs)
defaultEncoder = ourserializer.Encoder(
- WithPrettyEncode(false), // TODO: Also test the pretty serializer
- WithCommentsEncode(true),
+ PrettyEncode(false), // TODO: Also test the pretty serializer
+ PreserveCommentsStrict,
)
groupname = "foogroup"
@@ -30,9 +33,10 @@ var (
ext1gv = schema.GroupVersion{Group: groupname, Version: "v1alpha1"}
ext2gv = schema.GroupVersion{Group: groupname, Version: "v1alpha2"}
- intsb = runtime.NewSchemeBuilder(addInternalTypes)
- ext1sb = runtime.NewSchemeBuilder(registerConversions, addExternalTypes(ext1gv), v1_addDefaultingFuncs, registerOldCRD)
- ext2sb = runtime.NewSchemeBuilder(registerConversions, addExternalTypes(ext2gv), v2_addDefaultingFuncs, registerNewCRD)
+ intsb = runtime.NewSchemeBuilder(addInternalTypes)
+ ext1sb = runtime.NewSchemeBuilder(registerConversions, addExternalTypes(ext1gv), v1_addDefaultingFuncs, registerOldCRD)
+ ext2sb = runtime.NewSchemeBuilder(registerConversions, addExternalTypes(ext2gv), v2_addDefaultingFuncs, registerNewCRD)
+ yamlSep = []byte("---\n")
)
func v1_addDefaultingFuncs(scheme *runtime.Scheme) error {
@@ -251,38 +255,45 @@ var (
newCRDMeta = metav1.TypeMeta{APIVersion: "foogroup/v1alpha2", Kind: "CRD"}
unknownMeta = runtime.TypeMeta{APIVersion: "unknown/v1", Kind: "YouDontRecognizeMe"}
- oneSimple = []byte(`apiVersion: foogroup/v1alpha1
+ oneSimple = []byte(`---
+apiVersion: foogroup/v1alpha1
kind: Simple
testString: foo
`)
- simpleUnknownField = []byte(`apiVersion: foogroup/v1alpha1
+ simpleUnknownField = []byte(`---
+apiVersion: foogroup/v1alpha1
kind: Simple
testString: foo
unknownField: bar
`)
- simpleDuplicateField = []byte(`apiVersion: foogroup/v1alpha1
+ simpleDuplicateField = []byte(`---
+apiVersion: foogroup/v1alpha1
kind: Simple
testString: foo
testString: bar
`)
- unrecognizedVersion = []byte(`apiVersion: foogroup/v1alpha0
+ unrecognizedVersion = []byte(`---
+apiVersion: foogroup/v1alpha0
kind: Simple
testString: foo
`)
- unrecognizedGVK = []byte(`apiVersion: unknown/v1
+ unrecognizedGVK = []byte(`---
+apiVersion: unknown/v1
kind: YouDontRecognizeMe
testFooBar: true
`)
- oneComplex = []byte(`Int64: 0
+ oneComplex = []byte(`---
+Int64: 0
apiVersion: foogroup/v1alpha1
bool: false
int: 0
kind: Complex
string: bar
`)
- simpleAndComplex = []byte(string(oneSimple) + "---\n" + string(oneComplex))
+ simpleAndComplex = []byte(string(oneSimple) + string(oneComplex))
- testList = []byte(`apiVersion: v1
+ testList = []byte(`---
+apiVersion: v1
kind: List
items:
- apiVersion: foogroup/v1alpha1
@@ -303,7 +314,8 @@ items:
complexJSON = []byte(`{"apiVersion":"foogroup/v1alpha1","kind":"Complex","string":"bar","int":0,"Int64":0,"bool":false}
`)
- oldCRD = []byte(`# I'm a top comment
+ oldCRD = []byte(`---
+# I'm a top comment
apiVersion: foogroup/v1alpha1
kind: CRD
metadata:
@@ -312,14 +324,16 @@ metadata:
testString: foobar # Me too
`)
- oldCRDNoComments = []byte(`apiVersion: foogroup/v1alpha1
+ oldCRDNoComments = []byte(`---
+apiVersion: foogroup/v1alpha1
kind: CRD
metadata:
creationTimestamp: null
testString: foobar
`)
- newCRD = []byte(`# I'm a top comment
+ newCRD = []byte(`---
+# I'm a top comment
apiVersion: foogroup/v1alpha2
kind: CRD
metadata:
@@ -328,7 +342,8 @@ metadata:
otherString: foobar # Me too
`)
- newCRDNoComments = []byte(`apiVersion: foogroup/v1alpha2
+ newCRDNoComments = []byte(`---
+apiVersion: foogroup/v1alpha2
kind: CRD
metadata:
creationTimestamp: null
@@ -342,34 +357,30 @@ func TestEncode(t *testing.T) {
oldCRDObj := &CRDOldVersion{TestString: "foobar"}
newCRDObj := &CRDNewVersion{OtherString: "foobar"}
tests := []struct {
- name string
- ct ContentType
- objs []runtime.Object
- expected []byte
- expectedErr bool
+ name string
+ ct content.ContentType
+ objs []runtime.Object
+ want []byte
+ wantErr error
}{
- {"simple yaml", ContentTypeYAML, []runtime.Object{simpleObj}, oneSimple, false},
- {"complex yaml", ContentTypeYAML, []runtime.Object{complexObj}, oneComplex, false},
- {"both simple and complex yaml", ContentTypeYAML, []runtime.Object{simpleObj, complexObj}, simpleAndComplex, false},
- {"simple json", ContentTypeJSON, []runtime.Object{simpleObj}, simpleJSON, false},
- {"complex json", ContentTypeJSON, []runtime.Object{complexObj}, complexJSON, false},
- {"old CRD yaml", ContentTypeYAML, []runtime.Object{oldCRDObj}, oldCRDNoComments, false},
- {"new CRD yaml", ContentTypeYAML, []runtime.Object{newCRDObj}, newCRDNoComments, false},
+ {"simple yaml", content.ContentTypeYAML, []runtime.Object{simpleObj}, oneSimple, nil},
+ {"complex yaml", content.ContentTypeYAML, []runtime.Object{complexObj}, oneComplex, nil},
+ {"both simple and complex yaml", content.ContentTypeYAML, []runtime.Object{simpleObj, complexObj}, simpleAndComplex, nil},
+ {"simple json", content.ContentTypeJSON, []runtime.Object{simpleObj}, simpleJSON, nil},
+ {"complex json", content.ContentTypeJSON, []runtime.Object{complexObj}, complexJSON, nil},
+ {"old CRD yaml", content.ContentTypeYAML, []runtime.Object{oldCRDObj}, oldCRDNoComments, nil},
+ {"new CRD yaml", content.ContentTypeYAML, []runtime.Object{newCRDObj}, newCRDNoComments, nil},
//{"no-conversion simple", defaultEncoder, &runtimetest.ExternalSimple{TestString: "foo"}, simpleJSON, false},
//{"support internal", defaultEncoder, []runtime.Object{simpleObj}, []byte(`{"testString":"foo"}` + "\n"), false},
}
for _, rt := range tests {
t.Run(rt.name, func(t2 *testing.T) {
- buf := new(bytes.Buffer)
- actualErr := defaultEncoder.Encode(NewFrameWriter(rt.ct, buf), rt.objs...)
- actual := buf.Bytes()
- if (actualErr != nil) != rt.expectedErr {
- t2.Errorf("expected error %t but actual %t: %v", rt.expectedErr, actualErr != nil, actualErr)
- }
- if !bytes.Equal(actual, rt.expected) {
- t2.Errorf("expected %q but actual %q", string(rt.expected), string(actual))
- }
+ var buf bytes.Buffer
+ cw := content.ToBuffer(&buf, content.WithContentType(rt.ct))
+ err := defaultEncoder.Encode(frame.NewRecognizingWriter(cw), rt.objs...)
+ assert.ErrorIs(t, err, rt.wantErr)
+ assert.Equal(t, string(rt.want), buf.String())
})
}
}
@@ -381,8 +392,8 @@ func TestDecode(t *testing.T) {
data []byte
doDefaulting bool
doConversion bool
- expected runtime.Object
- expectedErr bool
+ want runtime.Object
+ wantErr bool
}{
{"old CRD hub conversion", oldCRD, false, true, &CRDNewVersion{newCRDMeta, metav1.ObjectMeta{}, "Old string foobar"}, false},
{"old CRD no conversion", oldCRD, false, false, &CRDOldVersion{oldCRDMeta, metav1.ObjectMeta{}, "foobar"}, false},
@@ -401,16 +412,12 @@ func TestDecode(t *testing.T) {
for _, rt := range tests {
t.Run(rt.name, func(t2 *testing.T) {
- obj, actual := ourserializer.Decoder(
- WithDefaultsDecode(rt.doDefaulting),
- WithConvertToHubDecode(rt.doConversion),
- ).Decode(NewYAMLFrameReader(FromBytes(rt.data)))
- if (actual != nil) != rt.expectedErr {
- t2.Errorf("expected error %t but actual %t: %v", rt.expectedErr, actual != nil, actual)
- }
- if rt.expected != nil && !reflect.DeepEqual(obj, rt.expected) {
- t2.Errorf("expected %#v but actual %#v", rt.expected, obj)
- }
+ obj, err := ourserializer.Decoder(
+ DefaultAtDecode(rt.doDefaulting),
+ ConvertToHub(rt.doConversion),
+ ).Decode(frame.NewYAMLReader(content.FromBytes(rt.data)))
+ assert.Equal(t, err != nil, rt.wantErr)
+ assert.Equal(t, rt.want, obj)
})
}
}
@@ -433,8 +440,8 @@ func TestDecodeInto(t *testing.T) {
{"complex external", oneComplex, false, &runtimetest.ExternalComplex{}, &runtimetest.ExternalComplex{TypeMeta: complexv1Meta, String: "bar"}, false},
{"defaulted complex external", oneComplex, true, &runtimetest.ExternalComplex{}, &runtimetest.ExternalComplex{TypeMeta: complexv1Meta, String: "bar", Integer64: 5}, false},
{"defaulted complex internal", oneComplex, true, &runtimetest.InternalComplex{}, &runtimetest.InternalComplex{String: "bar", Integer64: 5}, false},
- {"decode unknown obj into unknown", unrecognizedGVK, false, &runtime.Unknown{}, newUnknown(unknownMeta, unrecognizedGVK), false},
- {"decode known obj into unknown", oneComplex, false, &runtime.Unknown{}, newUnknown(complexv1Meta, oneComplex), false},
+ {"decode unknown obj into unknown", unrecognizedGVK, false, &runtime.Unknown{}, newUnknown(unknownMeta, bytes.TrimPrefix(unrecognizedGVK, yamlSep)), false},
+ {"decode known obj into unknown", oneComplex, false, &runtime.Unknown{}, newUnknown(complexv1Meta, bytes.TrimPrefix(oneComplex, yamlSep)), false},
{"no unknown fields", simpleUnknownField, false, &runtimetest.InternalSimple{}, nil, true},
{"no duplicate fields", simpleDuplicateField, false, &runtimetest.InternalSimple{}, nil, true},
{"no unrecognized API version", unrecognizedVersion, false, &runtimetest.InternalSimple{}, nil, true},
@@ -444,8 +451,8 @@ func TestDecodeInto(t *testing.T) {
t.Run(rt.name, func(t2 *testing.T) {
actual := ourserializer.Decoder(
- WithDefaultsDecode(rt.doDefaulting),
- ).DecodeInto(NewYAMLFrameReader(FromBytes(rt.data)), rt.obj)
+ DefaultAtDecode(rt.doDefaulting),
+ ).DecodeInto(frame.NewYAMLReader(content.FromBytes(rt.data)), rt.obj)
if (actual != nil) != rt.expectedErr {
t2.Errorf("expected error %t but actual %t: %v", rt.expectedErr, actual != nil, actual)
}
@@ -484,9 +491,9 @@ func TestDecodeAll(t *testing.T) {
for _, rt := range tests {
t.Run(rt.name, func(t2 *testing.T) {
objs, actual := ourserializer.Decoder(
- WithDefaultsDecode(rt.doDefaulting),
- WithListElementsDecoding(rt.listSplit),
- ).DecodeAll(NewYAMLFrameReader(FromBytes(rt.data)))
+ DefaultAtDecode(rt.doDefaulting),
+ DecodeListElements(rt.listSplit),
+ ).DecodeAll(frame.NewYAMLReader(content.FromBytes(rt.data)))
if (actual != nil) != rt.expectedErr {
t2.Errorf("expected error %t but actual %t: %v", rt.expectedErr, actual != nil, actual)
}
@@ -519,7 +526,7 @@ func TestDecodeUnknown(t *testing.T) {
expected runtime.Object
expectedErr bool
}{
- {"Decode unrecognized kinds into runtime.Unknown", unrecognizedGVK, true, newUnknown(unknownMeta, unrecognizedGVK), false},
+ {"Decode unrecognized kinds into runtime.Unknown", unrecognizedGVK, true, newUnknown(unknownMeta, bytes.TrimPrefix(unrecognizedGVK, yamlSep)), false},
{"Decode known kinds into known structs", oneComplex, true, &runtimetest.ExternalComplex{TypeMeta: complexv1Meta, String: "bar"}, false},
{"No support for unrecognized", unrecognizedGVK, false, nil, true},
}
@@ -527,8 +534,8 @@ func TestDecodeUnknown(t *testing.T) {
for _, rt := range tests {
t.Run(rt.name, func(t2 *testing.T) {
obj, actual := ourserializer.Decoder(
- WithUnknownDecode(rt.unknown),
- ).Decode(NewYAMLFrameReader(FromBytes(rt.data)))
+ DecodeUnknown(rt.unknown),
+ ).Decode(frame.NewYAMLReader(content.FromBytes(rt.data)))
if (actual != nil) != rt.expectedErr {
t2.Errorf("expected error %t but actual %t: %v", rt.expectedErr, actual != nil, actual)
}
@@ -543,15 +550,15 @@ func TestRoundtrip(t *testing.T) {
tests := []struct {
name string
data []byte
- ct ContentType
+ ct content.ContentType
gv *schema.GroupVersion // use a specific groupversion if set. if nil, then use the default Encode
}{
- {"simple yaml", oneSimple, ContentTypeYAML, nil},
- {"complex yaml", oneComplex, ContentTypeYAML, nil},
- {"simple json", simpleJSON, ContentTypeJSON, nil},
- {"complex json", complexJSON, ContentTypeJSON, nil},
- {"crd with objectmeta & comments", oldCRD, ContentTypeYAML, &ext1gv}, // encode as v1alpha1
- {"unknown object", unrecognizedGVK, ContentTypeYAML, nil},
+ {"simple yaml", oneSimple, content.ContentTypeYAML, nil},
+ {"complex yaml", oneComplex, content.ContentTypeYAML, nil},
+ {"simple json", simpleJSON, content.ContentTypeJSON, nil},
+ {"complex json", complexJSON, content.ContentTypeJSON, nil},
+ {"crd with objectmeta & comments", oldCRD, content.ContentTypeYAML, &ext1gv}, // encode as v1alpha1
+ {"unknown object", unrecognizedGVK, content.ContentTypeYAML, nil},
// TODO: Maybe an unit test (case) for a type with ObjectMeta embedded as a pointer being nil
// TODO: Make sure that the Encode call (with comments support) doesn't mutate the object state
// i.e. doesn't remove the annotation after use so multiple similar encode calls work.
@@ -560,19 +567,20 @@ func TestRoundtrip(t *testing.T) {
for _, rt := range tests {
t.Run(rt.name, func(t2 *testing.T) {
obj, err := ourserializer.Decoder(
- WithConvertToHubDecode(true),
- WithCommentsDecode(true),
- WithUnknownDecode(true),
- ).Decode(NewYAMLFrameReader(FromBytes(rt.data)))
+ ConvertToHub(true),
+ PreserveCommentsStrict,
+ DecodeUnknown(true),
+ ).Decode(frame.NewYAMLReader(content.FromBytes(rt.data)))
if err != nil {
t2.Errorf("unexpected decode error: %v", err)
return
}
- buf := new(bytes.Buffer)
+ var buf bytes.Buffer
+ cw := content.ToBuffer(&buf, content.WithContentType(rt.ct))
if rt.gv == nil {
- err = defaultEncoder.Encode(NewFrameWriter(rt.ct, buf), obj)
+ err = defaultEncoder.Encode(frame.NewRecognizingWriter(cw), obj)
} else {
- err = defaultEncoder.EncodeForGroupVersion(NewFrameWriter(rt.ct, buf), obj, *rt.gv)
+ err = defaultEncoder.EncodeForGroupVersion(frame.NewRecognizingWriter(cw), obj, *rt.gv)
}
actual := buf.Bytes()
if err != nil {
@@ -684,13 +692,13 @@ testString: bar
func TestListRoundtrip(t *testing.T) {
objs, err := ourserializer.Decoder(
WithCommentsDecode(true),
- ).DecodeAll(NewYAMLFrameReader(FromBytes(testList)))
+ ).DecodeAll(frame.NewYAMLReader(content.FromBytes(testList)))
if err != nil {
t.Fatal(err)
}
buf := new(bytes.Buffer)
- if err := defaultEncoder.Encode(NewFrameWriter(ContentTypeYAML, buf), objs...); err != nil {
+ if err := defaultEncoder.Encode(frame.NewWriter(content.ContentTypeYAML, buf), objs...); err != nil {
t.Fatal(err)
}
actual := buf.Bytes()
diff --git a/pkg/serializer/utils.go b/pkg/serializer/utils.go
new file mode 100644
index 00000000..88ce6d82
--- /dev/null
+++ b/pkg/serializer/utils.go
@@ -0,0 +1,163 @@
+package serializer
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/apiutil"
+)
+
+// LockedScheme describes a shared scheme that should be locked before writing, and unlocked
+// after writing. Reading can be done safely without any locking.
+type LockedScheme interface {
+ Scheme() *runtime.Scheme
+ SchemeLock()
+ SchemeUnlock()
+}
+
+func newLockedScheme(scheme *runtime.Scheme) LockedScheme {
+ return &lockedScheme{scheme, &sync.Mutex{}}
+}
+
+type lockedScheme struct {
+ scheme *runtime.Scheme
+ mu *sync.Mutex
+}
+
+func (s *lockedScheme) Scheme() *runtime.Scheme {
+ return s.scheme
+}
+
+func (s *lockedScheme) SchemeLock() {
+ s.mu.Lock()
+}
+
+func (s *lockedScheme) SchemeUnlock() {
+ s.mu.Unlock()
+}
+
+func GVKForObject(scheme *runtime.Scheme, obj runtime.Object) (schema.GroupVersionKind, error) {
+ // Safety check: one should not do this
+ if obj == nil || obj.GetObjectKind() == nil {
+ return schema.GroupVersionKind{}, fmt.Errorf("GVKForObject: obj or obj.GetObjectKind() must not be nil")
+ }
+
+ // If this is a runtime.Unknown object, return the GVK stored in TypeMeta
+ if gvk := obj.GetObjectKind().GroupVersionKind(); IsUnknown(obj) && !gvk.Empty() {
+ return gvk, nil
+ }
+
+ // Special case: Allow objects with two versions to be registered, when the caller is specific
+ // about what version they want populated.
+ // This is needed essentially for working around that there are specific K8s types (structs)
+ // that have been registered with multiple GVKs (e.g. a Deployment struct in both apps & extensions)
+ // TODO: Maybe there is a better way to solve this? Remove unwanted entries from the scheme typeToGVK
+ // map manually?
+ gvks, _, _ := scheme.ObjectKinds(obj)
+ if len(gvks) > 1 {
+ // If we have a configuration with more than one gvk for the same object,
+ // check the set GVK on the object to "choose" the right one, if exists in the list
+ setGVK := obj.GetObjectKind().GroupVersionKind()
+ if !setGVK.Empty() {
+ for _, gvk := range gvks {
+ if EqualsGVK(setGVK, gvk) {
+ return gvk, nil
+ }
+ }
+ }
+ }
+
+ // TODO: Should we just copy-paste this one, or move it into k8s core to avoid importing controller-runtime
+ // only for this function?
+ return apiutil.GVKForObject(obj, scheme)
+}
+
+// GVKForList returns the GroupVersionKind for the items in a given List type.
+// In the case of Unstructured or PartialObjectMetadata, it is required that this
+// information is already set in TypeMeta. The "List" suffix is never returned.
+func GVKForList(obj client.ObjectList, scheme *runtime.Scheme) (schema.GroupVersionKind, error) {
+ // First, get the GVK as normal.
+ gvk, err := GVKForObject(scheme, obj)
+ if err != nil {
+ return schema.GroupVersionKind{}, err
+ }
+ // Make sure this is a list type, i.e. it has the an "Items" field.
+ isList := meta.IsListType(obj)
+ if !isList {
+ return schema.GroupVersionKind{}, ErrObjectIsNotList
+ }
+ // Make sure the returned GVK never ends in List.
+ gvk.Kind = strings.TrimSuffix(gvk.Kind, "List")
+ return gvk, nil
+}
+
+// PreferredVersionForGroup returns the most preferred version of a group in the scheme.
+// In order to tell the scheme what your preferred ordering is, use scheme.SetVersionPriority().
+func PreferredVersionForGroup(scheme *runtime.Scheme, groupName string) (schema.GroupVersion, error) {
+ // Get the prioritized versions for the given group
+ gvs := scheme.PrioritizedVersionsForGroup(groupName)
+ if len(gvs) < 1 {
+ return schema.GroupVersion{}, fmt.Errorf("expected some version to be registered for group %s", groupName)
+ }
+ // Use the first, preferred, (external) version
+ return gvs[0], nil
+}
+
+// EqualsGK returns true if gk1 and gk2 have the same fields.
+func EqualsGK(gk1, gk2 schema.GroupKind) bool {
+ return gk1.Group == gk2.Group && gk1.Kind == gk2.Kind
+}
+
+// EqualsGVK returns true if gvk1 and gvk2 have the same fields.
+func EqualsGVK(gvk1, gvk2 schema.GroupVersionKind) bool {
+ return EqualsGK(gvk1.GroupKind(), gvk2.GroupKind()) && gvk1.Version == gvk2.Version
+}
+
+func IsUnknown(obj runtime.Object) bool {
+ _, isUnknown := obj.(*runtime.Unknown)
+ return isUnknown
+}
+
+func IsPartialObject(obj runtime.Object) bool {
+ _, isPartial := obj.(*metav1.PartialObjectMetadata)
+ return isPartial
+}
+
+func IsPartialObjectList(obj runtime.Object) bool {
+ _, isPartialList := obj.(*metav1.PartialObjectMetadataList)
+ return isPartialList
+}
+
+// IsUnstructured checks if obj is runtime.Unstructured
+func IsUnstructured(obj runtime.Object) bool {
+ _, isUnstructured := obj.(runtime.Unstructured)
+ return isUnstructured
+}
+
+// IsUnstructuredList checks if obj is *unstructured.UnstructuredList
+func IsUnstructuredList(obj runtime.Object) bool {
+ _, isUnstructuredList := obj.(*unstructured.UnstructuredList)
+ return isUnstructuredList
+}
+
+// IsNonConvertible returns true for unstructured, partial and unknown objects
+// that should not be converted.
+func IsNonConvertible(obj runtime.Object) bool {
+ // TODO: Should Lists also be marked non-convertible?
+ // IsUnstructured also covers IsUnstructuredList -- *UnstructuredList implements runtime.Unstructured
+ return IsUnstructured(obj) || IsPartialObject(obj) || IsPartialObjectList(obj) || IsUnknown(obj)
+}
+
+// IsTyped returns true if the object is typed, i.e. registered with the given
+// scheme and not unversioned.
+func IsTyped(obj runtime.Object, scheme *runtime.Scheme) bool {
+ _, isUnversioned, err := scheme.ObjectKinds(obj)
+ return !isUnversioned && err == nil
+}
diff --git a/pkg/storage/backend/backend.go b/pkg/storage/backend/backend.go
new file mode 100644
index 00000000..eee027f6
--- /dev/null
+++ b/pkg/storage/backend/backend.go
@@ -0,0 +1,415 @@
+package backend
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/weaveworks/libgitops/pkg/content"
+ "github.com/weaveworks/libgitops/pkg/frame"
+ "github.com/weaveworks/libgitops/pkg/serializer"
+ "github.com/weaveworks/libgitops/pkg/storage"
+ "github.com/weaveworks/libgitops/pkg/storage/core"
+ "go.uber.org/multierr"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/util/sets"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+var (
+ // ErrCannotSaveMetadata is returned if the user tries to save metadata-only objects
+ ErrCannotSaveMetadata = errors.New("cannot save (Create|Update|Patch) *metav1.PartialObjectMetadata")
+ // ErrNameRequired is returned when .metadata.name is unset
+ // TODO: Support generateName?
+ ErrNameRequired = errors.New(".metadata.name is required")
+)
+
+// TODO: Make a *core.Unknown that has
+// 1. TypeMeta
+// 2. DeepCopies (for Object compatibility),
+// 3. ObjectMeta
+// 4. Spec { Data []byte, ContentType ContentType, Object interface{} }
+// 5. Status { Data []byte, ContentType ContentType, Object interface{} }
+// TODO: Need to make sure we never write this internal struct to disk (MarshalJSON error?)
+
+// Create an alias for the Object type
+type Object = client.Object
+
+type Accessors interface {
+ Storage() storage.Storage
+ NamespaceEnforcer() NamespaceEnforcer
+ Encoder() serializer.Encoder
+ Decoder() serializer.Decoder
+}
+
+type WriteAccessors interface {
+ Validator() Validator
+ StorageVersioner() StorageVersioner
+}
+
+type Reader interface {
+ Accessors
+
+ Get(ctx context.Context, obj Object) error
+ storage.Lister
+}
+
+type Writer interface {
+ Accessors
+ WriteAccessors
+
+ Create(ctx context.Context, obj Object) error
+ Update(ctx context.Context, obj Object) error
+ Delete(ctx context.Context, obj Object) error
+}
+
+type StatusWriter interface {
+ Accessors
+ WriteAccessors
+
+ UpdateStatus(ctx context.Context, obj Object) error
+}
+
+// Backend combines the Reader and Writer interfaces for a fully-functioning backend
+// implementation; used by the Client interface. Backend can be through as the "API Server"
+// logic in between a "frontend" Client and "document" Storage. In other words, the backend
+// handles serialization, versioning, validation, and policy enforcement.
+//
+// Any callable function should immediately abort if the given context from the client
+// has expired; so an invalid context doesn't "leak down" to the Storage system.
+type Backend interface {
+ Reader
+ Writer
+ StatusWriter
+}
+
+type ChangeOperation string
+
+const (
+ ChangeOperationCreate ChangeOperation = "create"
+ ChangeOperationUpdate ChangeOperation = "update"
+ ChangeOperationDelete ChangeOperation = "delete"
+)
+
+type Validator interface {
+ ValidateChange(ctx context.Context, backend Reader, op ChangeOperation, obj Object) error
+}
+
+// NewGeneric creates a new generic Backend for the given underlying Storage for storing the
+// objects once serialized, encoders and decoders for (de)serialization, the NamespaceEnforcer
+// for enforcing a namespacing policy, the StorageVersioner for telling the encoder what version
+// of many to use when encoding, and optionally, a Validator.
+//
+// All parameters except the validator are mandatory.
+func NewGeneric(
+ storage storage.Storage,
+ encoder serializer.Encoder,
+ decoder serializer.Decoder,
+ enforcer NamespaceEnforcer,
+ versioner StorageVersioner,
+ validator Validator,
+) (*Generic, error) {
+ if storage == nil {
+ return nil, fmt.Errorf("storage is mandatory")
+ }
+ if encoder == nil {
+ return nil, fmt.Errorf("encoder is mandatory")
+ }
+ if decoder == nil {
+ return nil, fmt.Errorf("decoder is mandatory")
+ }
+ if enforcer == nil {
+ return nil, fmt.Errorf("enforcer is mandatory")
+ }
+ if versioner == nil {
+ return nil, fmt.Errorf("versioner is mandatory")
+ }
+ return &Generic{
+ // It shouldn't matter if we use the encoder's or decoder's SchemeLock
+ LockedScheme: encoder.GetLockedScheme(),
+ encoder: encoder,
+ decoder: decoder,
+
+ storage: storage,
+ enforcer: enforcer,
+ validator: validator,
+ versioner: versioner,
+ }, nil
+}
+
+var _ Backend = &Generic{}
+
+type Generic struct {
+ serializer.LockedScheme
+ encoder serializer.Encoder
+ decoder serializer.Decoder
+
+ storage storage.Storage
+ enforcer NamespaceEnforcer
+ validator Validator
+ versioner StorageVersioner
+}
+
+func (b *Generic) Encoder() serializer.Encoder {
+ return b.encoder
+}
+
+func (b *Generic) Decoder() serializer.Decoder {
+ return b.decoder
+}
+
+func (b *Generic) Storage() storage.Storage {
+ return b.storage
+}
+
+func (b *Generic) NamespaceEnforcer() NamespaceEnforcer {
+ return b.enforcer
+}
+
+func (b *Generic) Validator() Validator {
+ return b.validator
+}
+
+func (b *Generic) StorageVersioner() StorageVersioner {
+ return b.versioner
+}
+
+func (b *Generic) Get(ctx context.Context, obj Object) error {
+ // If the context has been cancelled or timed out; directly return an error
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+
+ // Get the versioned ID for the given obj. This might mutate obj wrt namespacing info.
+ id, err := b.idForObj(ctx, obj)
+ if err != nil {
+ return err
+ }
+ // Read the underlying bytes
+ data, err := b.storage.Read(ctx, id)
+ if err != nil {
+ return err
+ }
+ // Get the right content type for the data
+ ct, err := b.storage.ContentType(ctx, id)
+ if err != nil {
+ return err
+ }
+
+ // TODO: Check if the decoder "replaces" already-set fields or "leaks" old data?
+ // TODO: Here it'd be great with a frame.FromSingleBytes method
+ return b.decoder.DecodeInto(frame.NewSingleReader(ct, content.FromBytes(data)), obj)
+}
+
+// ListGroupKinds returns all known GroupKinds by the implementation at that
+// time. The set might vary over time as data is created and deleted; and
+// should not be treated as an universal "what types could possibly exist",
+// but more generally, "what are the GroupKinds of the objects that currently
+// exist"? However, obviously, specific implementations might honor this
+// guideline differently. This might be used for introspection into the system.
+func (b *Generic) ListGroupKinds(ctx context.Context) ([]core.GroupKind, error) {
+ // If the context has been cancelled or timed out; directly return an error
+ if err := ctx.Err(); err != nil {
+ return nil, err
+ }
+
+ return b.storage.ListGroupKinds(ctx)
+}
+
+// ListNamespaces lists the available namespaces for the given GroupKind.
+// This function shall only be called for namespaced objects, it is up to
+// the caller to make sure they do not call this method for root-spaced
+// objects; for that the behavior is undefined (but returning an error
+// is recommended).
+func (b *Generic) ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.String, error) {
+ // If the context has been cancelled or timed out; directly return an error
+ if err := ctx.Err(); err != nil {
+ return nil, err
+ }
+
+ return b.storage.ListNamespaces(ctx, gk)
+}
+
+// ListObjectKeys returns a list of names (with optionally, the namespace).
+// For namespaced GroupKinds, the caller must provide a namespace, and for
+// root-spaced GroupKinds, the caller must not. When namespaced, this function
+// must only return object keys for that given namespace.
+func (b *Generic) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) (core.UnversionedObjectIDSet, error) {
+ // If the context has been cancelled or timed out; directly return an error
+ if err := ctx.Err(); err != nil {
+ return nil, err
+ }
+
+ return b.storage.ListObjectIDs(ctx, gk, namespace)
+}
+
+func (b *Generic) Create(ctx context.Context, obj Object) error {
+ // If the context has been cancelled or timed out; directly return an error
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+
+ // We must never save metadata-only structs
+ if serializer.IsPartialObject(obj) {
+ return ErrCannotSaveMetadata
+ }
+
+ // Get the versioned ID for the given obj. This might mutate obj wrt namespacing info.
+ id, err := b.idForObj(ctx, obj)
+ if err != nil {
+ return err
+ }
+
+ // Do not create the object if it already exists.
+ exists, err := b.storage.Exists(ctx, id)
+ if err != nil {
+ return err
+ }
+ if exists {
+ return core.NewErrAlreadyExists(id)
+ }
+
+ // Validate that the change is ok
+ // TODO: Don't make "upcasting" possible here
+ if b.validator != nil {
+ if err := b.validator.ValidateChange(ctx, b, ChangeOperationCreate, obj); err != nil {
+ return err
+ }
+ }
+
+ // Internal, common write shared with Update()
+ return b.write(ctx, id, obj)
+}
+func (b *Generic) Update(ctx context.Context, obj Object) error { // If the context has been cancelled or timed out; directly return an error
+ // If the context has been cancelled or timed out; directly return an error
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+
+ // We must never save metadata-only structs
+ if serializer.IsPartialObject(obj) {
+ return ErrCannotSaveMetadata
+ }
+
+ // Get the versioned ID for the given obj. This might mutate obj wrt namespacing info.
+ id, err := b.idForObj(ctx, obj)
+ if err != nil {
+ return err
+ }
+
+ // Require that the object already exists. If err != nil,
+ // exists == false, hence it's enough to check for !exists
+ if exists, err := b.storage.Exists(ctx, id); !exists {
+ return multierr.Combine(core.NewErrNotFound(id), err)
+ }
+
+ // Validate that the change is ok
+ // TODO: Don't make "upcasting" possible here
+ if b.validator != nil {
+ if err := b.validator.ValidateChange(ctx, b, ChangeOperationUpdate, obj); err != nil {
+ return err
+ }
+ }
+
+ // Internal, common write shared with Create()
+ return b.write(ctx, id, obj)
+}
+
+func (b *Generic) UpdateStatus(ctx context.Context, obj Object) error {
+ // If the context has been cancelled or timed out; directly return an error
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+
+ return core.ErrNotImplemented // TODO
+}
+
+func (b *Generic) write(ctx context.Context, id core.ObjectID, obj Object) error {
+ // Get the content type of the object
+ ct, err := b.storage.ContentType(ctx, id)
+ if err != nil {
+ return err
+ }
+ // Resolve the desired storage version
+ gv, err := b.versioner.StorageVersion(id)
+ if err != nil {
+ return err
+ }
+
+ // Set creationTimestamp if not already populated
+ t := obj.GetCreationTimestamp()
+ if t.IsZero() {
+ obj.SetCreationTimestamp(metav1.Now())
+ }
+
+ var objBytes bytes.Buffer
+ // This FrameWriter works for any content type; and transparently writes to objBytes
+ fw := frame.ToSingleBuffer(ct, &objBytes)
+ // The encoder is set to use the given ContentType through fw; and encodes obj.
+ if err := b.encoder.EncodeForGroupVersion(fw, obj, gv); err != nil {
+ return err
+ }
+
+ return b.storage.Write(ctx, id, objBytes.Bytes())
+}
+
+func (b *Generic) Delete(ctx context.Context, obj Object) error {
+ // If the context has been cancelled or timed out; directly return an error
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+
+ // Get the versioned ID for the given obj. This might mutate obj wrt namespacing info.
+ id, err := b.idForObj(ctx, obj)
+ if err != nil {
+ return err
+ }
+
+ // Verify it did exist. If err != nil,
+ // exists == false, hence it's enough to check for !exists
+ if exists, err := b.storage.Exists(ctx, id); !exists {
+ return multierr.Combine(core.NewErrNotFound(id), err)
+ }
+
+ // Validate that the change is ok
+ // TODO: Don't make "upcasting" possible here
+ if b.validator != nil {
+ if err := b.validator.ValidateChange(ctx, b, ChangeOperationDelete, obj); err != nil {
+ return err
+ }
+ }
+
+ // Delete it from the underlying storage
+ return b.storage.Delete(ctx, id)
+}
+
+// Note: This should also work for unstructured and partial metadata objects
+func (b *Generic) idForObj(ctx context.Context, obj Object) (core.ObjectID, error) {
+ // Get the GroupVersionKind of the given object.
+ gvk, err := serializer.GVKForObject(b.Scheme(), obj)
+ if err != nil {
+ return nil, err
+ }
+
+ // Object must always have .metadata.name set
+ if len(obj.GetName()) == 0 {
+ return nil, ErrNameRequired
+ }
+
+ // Enforce the given namespace policy. This might mutate obj.
+ // TODO: disallow "upcasting" the Lister to a full-blown Storage?
+ if err := b.enforcer.EnforceNamespace(
+ ctx,
+ obj,
+ gvk,
+ b.Storage().Namespacer(),
+ b.Storage(),
+ ); err != nil {
+ return nil, err
+ }
+
+ // At this point we know name is non-empty, and the namespace field is correct,
+ // according to policy
+ return core.NewObjectID(gvk, core.ObjectKeyFromMetav1Object(obj)), nil
+}
diff --git a/pkg/storage/backend/enforcer.go b/pkg/storage/backend/enforcer.go
new file mode 100644
index 00000000..91ba2deb
--- /dev/null
+++ b/pkg/storage/backend/enforcer.go
@@ -0,0 +1,116 @@
+package backend
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/weaveworks/libgitops/pkg/storage"
+ "github.com/weaveworks/libgitops/pkg/storage/core"
+)
+
+var (
+ // ErrNoSuchNamespace means that the set of namespaces was searched in the
+ // system, but the requested namespace wasn't in that list.
+ ErrNoSuchNamespace = errors.New("no such namespace in the system")
+)
+
+// NamespaceEnforcer enforces a namespace policy for the Backend.
+type NamespaceEnforcer interface {
+ // EnforceNamespace makes sure that:
+ // a) Any namespaced object has a non-empty namespace field after this call
+ // b) Any non-namespaced object has an empty namespace field after this call
+ // c) The applicable namespace policy of the user's liking is enforced (e.g.
+ // that there are only certain valid namespaces that can be used).
+ //
+ // This call is allowed to mutate obj. gvk represents the GroupVersionKind
+ // of obj. The namespacer can be used to figure out if the given object is
+ // namespaced or not. The given lister might be used to list object IDs,
+ // or existing namespaces in the system.
+ //
+ // See GenericNamespaceEnforcer for an example implementation, or
+ // pkg/storage/kube.NewNamespaceEnforcer() for a sample application.
+ EnforceNamespace(ctx context.Context, obj Object, gvk core.GroupVersionKind, namespacer storage.Namespacer, lister storage.Lister) error
+}
+
+// GenericNamespaceEnforcer is a NamespaceEnforcer that:
+// a) sets a default namespace for namespaced objects that have
+// the namespace field left empty
+// b) makes sure non-namespaced objects do not have the namespace
+// field set, by pruning any previously-set value.
+// c) if NamespaceGroupKind is non-nil; lists valid Namespace objects
+// in the system (of the given GroupKind); and matches namespaced
+// objects' namespace field against the listed Namespace objects'
+// .metadata.name field.
+//
+// For an example of how to configure this enforcer in the way
+// Kubernetes itself (approximately) does, see pkg/storage/kube.
+// NewNamespaceEnforcer().
+type GenericNamespaceEnforcer struct {
+ // DefaultNamespace describes the default namespace string
+ // that should be set, if a namespaced object's namespace
+ // field is empty.
+ // +required
+ DefaultNamespace string
+ // NamespaceGroupKind describes the GroupKind for Namespace
+ // objects in the system. If non-nil, objects with such
+ // GroupKind are listed, and their .metadata.name is matched
+ // against the current object's namespace field. If nil, any
+ // namespace value is considered valid.
+ // +optional
+ NamespaceGroupKind *core.GroupKind
+}
+
+func (e GenericNamespaceEnforcer) EnforceNamespace(ctx context.Context, obj Object, gvk core.GroupVersionKind, namespacer storage.Namespacer, lister storage.Lister) error {
+ // Get namespacing info
+ namespaced, err := namespacer.IsNamespaced(gvk.GroupKind())
+ if err != nil {
+ return err
+ }
+
+ // Enforce generic rules
+ ns := obj.GetNamespace()
+ if !namespaced {
+ // If a namespace was set, it must be sanitized, as non-namespaced
+ // resources must have namespace field empty.
+ if len(ns) != 0 {
+ obj.SetNamespace("")
+ }
+ return nil
+ }
+ // The resource is namespaced.
+ // If it is empty, set it to the default namespace.
+ if len(ns) == 0 {
+ // Verify that DefaultNamespace is non-empty
+ if len(e.DefaultNamespace) == 0 {
+ return fmt.Errorf("GenericNamespaceEnforcer.DefaultNamespace is mandatory: %w", core.ErrInvalidParameter)
+ }
+ // Mutate obj and set the namespace field to the default, then return
+ obj.SetNamespace(e.DefaultNamespace)
+ return nil
+ }
+
+ // If the namespace field is set, but NamespaceGroupKind is
+ // nil, it means that any non-empty namespace value is
+ // valid.
+ if e.NamespaceGroupKind == nil {
+ return nil
+ }
+
+ // However, if a Namespace GroupKind was given, look it up using
+ // the lister, and verify its .metadata.name matches the given
+ // namespace value.
+ objIDs, err := lister.ListObjectIDs(ctx, *e.NamespaceGroupKind, "")
+ if err != nil {
+ return err
+ }
+ // Loop through the IDs, and try to match it against the set ns
+ for _, id := range objIDs.List() {
+ if id.ObjectKey().Name == ns {
+ // Found the namespace; this is a valid setting
+ return nil
+ }
+ }
+ // The set namespace doesn't belong to the set of valid namespaces, error
+ return fmt.Errorf("%w: %q", ErrNoSuchNamespace, ns)
+}
diff --git a/pkg/storage/backend/versioner.go b/pkg/storage/backend/versioner.go
new file mode 100644
index 00000000..93b18934
--- /dev/null
+++ b/pkg/storage/backend/versioner.go
@@ -0,0 +1,31 @@
+package backend
+
+import (
+ "fmt"
+
+ "github.com/weaveworks/libgitops/pkg/serializer"
+ "github.com/weaveworks/libgitops/pkg/storage/core"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// StorageVersioner is an interface that determines what version the Object
+// with the given ID should be serialized as.
+type StorageVersioner interface {
+ StorageVersion(id core.ObjectID) (core.GroupVersion, error)
+}
+
+// SchemePreferredVersioner uses the prioritization information in the runtime.Scheme to
+// determine what the preferred version should be. The caller is responsible for
+// registering this information with the scheme using scheme.SetVersionPriority() before
+// using this StorageVersioner. If SetVersionPriority has not been run, the version returned
+// completely arbitrary.
+type SchemePreferredVersioner struct {
+ Scheme *runtime.Scheme
+}
+
+func (v SchemePreferredVersioner) StorageVersion(id core.ObjectID) (core.GroupVersion, error) {
+ if v.Scheme == nil {
+ return core.GroupVersion{}, fmt.Errorf("programmer error: SchemePreferredVersioner.Scheme must not be nil")
+ }
+ return serializer.PreferredVersionForGroup(v.Scheme, id.GroupKind().Group)
+}
diff --git a/pkg/storage/cache/cache.go b/pkg/storage/cache/cache.go
deleted file mode 100644
index 11a4991b..00000000
--- a/pkg/storage/cache/cache.go
+++ /dev/null
@@ -1,197 +0,0 @@
-package cache
-
-/*
-
-TODO: Revisit if we need this file/package in the future.
-
-import (
- log "github.com/sirupsen/logrus"
- "github.com/weaveworks/libgitops/pkg/runtime"
- "github.com/weaveworks/libgitops/pkg/serializer"
- "github.com/weaveworks/libgitops/pkg/storage"
- "k8s.io/apimachinery/pkg/runtime/schema"
-)
-
-// Cache is an intermediate caching layer, which conforms to Storage
-// Typically you back the cache with an actual storage
-type Cache interface {
- storage.Storage
- // Flush is used to write the state of the entire cache to storage
- // Warning: this is a very expensive operation
- Flush() error
-}
-
-type cache struct {
- // storage is the backing Storage for the cache
- // used to look up non-cached Objects
- storage storage.Storage
-
- // index caches the Objects by GroupVersionKind and UID
- // This guarantees uniqueness when looking up a specific Object
- index *index
-}
-
-var _ Cache = &cache{}
-
-func NewCache(backingStorage storage.Storage) Cache {
- c := &cache{
- storage: backingStorage,
- index: newIndex(backingStorage),
- }
-
- return c
-}
-
-func (s *cache) Serializer() serializer.Serializer {
- return s.storage.Serializer()
-}
-
-func (c *cache) New(gvk schema.GroupVersionKind) (runtime.Object, error) {
- // Request the storage to create the Object. The
- // newly generated Object has not got an UID which
- // is required for indexing, so just return it
- // without storing it into the cache
- return c.storage.New(gvk)
-}
-
-func (c *cache) Get(gvk schema.GroupVersionKind, uid runtime.UID) (obj runtime.Object, err error) {
- log.Tracef("cache: Get %s with UID %q", gvk.Kind, uid)
-
- // If the requested Object resides in the cache, return it
- if obj, err = c.index.loadByID(gvk, uid); err != nil || obj != nil {
- return
- }
-
- // Request the Object from the storage
- obj, err = c.storage.Get(gvk, uid)
-
- // If no errors occurred, cache it
- if err == nil {
- err = c.index.store(obj)
- }
-
- return
-}
-
-func (c *cache) GetMeta(gvk schema.GroupVersionKind, uid runtime.UID) (obj runtime.Object, err error) {
- log.Tracef("cache: GetMeta %s with UID %q", gvk.Kind, uid)
-
- obj, err = c.storage.GetMeta(gvk, uid)
-
- // If no errors occurred while loading, store the Object in the cache
- if err == nil {
- err = c.index.storeMeta(obj)
- }
-
- return
-}
-
-func (c *cache) Set(gvk schema.GroupVersionKind, obj runtime.Object) error {
- log.Tracef("cache: Set %s with UID %q", gvk.Kind, obj.GetUID())
-
- // Store the changed Object in the cache
- if err := c.index.store(obj); err != nil {
- return err
- }
-
- // TODO: For now the cache always flushes, we might add automatic flushing later
- return c.storage.Set(gvk, obj)
-}
-
-func (c *cache) Patch(gvk schema.GroupVersionKind, uid runtime.UID, patch []byte) error {
- // TODO: For now patches are always flushed, the cache will load the updated Object on-demand on access
- return c.storage.Patch(gvk, uid, patch)
-}
-
-func (c *cache) Delete(gvk schema.GroupVersionKind, uid runtime.UID) error {
- log.Tracef("cache: Delete %s with UID %q", gvk.Kind, uid)
-
- // Delete the given Object from the cache and storage
- c.index.delete(gvk, uid)
- return c.storage.Delete(gvk, uid)
-}
-
-type listFunc func(gvk schema.GroupVersionKind) ([]runtime.Object, error)
-type cacheStoreFunc func([]runtime.Object) error
-
-// list is a common handler for List and ListMeta
-func (c *cache) list(gvk schema.GroupVersionKind, slf, clf listFunc, csf cacheStoreFunc) (objs []runtime.Object, err error) {
- var storageCount uint64
- if storageCount, err = c.storage.Count(gvk); err != nil {
- return
- }
-
- if c.index.count(gvk) != storageCount {
- log.Tracef("cache: miss when listing: %s", gvk)
- // If the cache doesn't track all of the Objects, request them from the storage
- if objs, err = slf(gvk); err != nil {
- // If no errors occurred, store the Objects in the cache
- err = csf(objs)
- }
- } else {
- log.Tracef("cache: hit when listing: %s", gvk)
- // If the cache tracks everything, return the cache's contents
- objs, err = clf(gvk)
- }
-
- return
-}
-
-func (c *cache) List(gvk schema.GroupVersionKind) ([]runtime.Object, error) {
- return c.list(gvk, c.storage.List, c.index.list, c.index.storeAll)
-}
-
-func (c *cache) ListMeta(gvk schema.GroupVersionKind) ([]runtime.Object, error) {
- return c.list(gvk, c.storage.ListMeta, c.index.listMeta, c.index.storeAllMeta)
-}
-
-func (c *cache) Count(gvk schema.GroupVersionKind) (uint64, error) {
- // The cache is transparent about how many items it has cached
- return c.storage.Count(gvk)
-}
-
-func (c *cache) Checksum(gvk schema.GroupVersionKind, uid runtime.UID) (string, error) {
- // The cache is transparent about the checksums
- return c.storage.Checksum(gvk, uid)
-}
-
-func (c *cache) RawStorage() storage.RawStorage {
- return c.storage.RawStorage()
-}
-
-func (c *cache) Close() error {
- return c.storage.Close()
-}
-
-func (c *cache) Flush() error {
- // Load the entire cache
- allObjects, err := c.index.loadAll()
- if err != nil {
- return err
- }
-
- for _, obj := range allObjects {
- // Request the storage to save each Object
- if err := c.storage.Set(obj); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// PartialObjectFrom is used to create a bound PartialObjectImpl from an Object.
-// Note: This might be useful later (maybe here or maybe in pkg/runtime) if re-enable the cache
-func PartialObjectFrom(obj Object) (PartialObject, error) {
- tm, ok := obj.GetObjectKind().(*metav1.TypeMeta)
- if !ok {
- return nil, fmt.Errorf("PartialObjectFrom: Cannot cast obj to *metav1.TypeMeta, is %T", obj.GetObjectKind())
- }
- om, ok := obj.GetObjectMeta().(*metav1.ObjectMeta)
- if !ok {
- return nil, fmt.Errorf("PartialObjectFrom: Cannot cast obj to *metav1.ObjectMeta, is %T", obj.GetObjectMeta())
- }
- return &PartialObjectImpl{tm, om}, nil
-}
-
-*/
diff --git a/pkg/storage/cache/index.go b/pkg/storage/cache/index.go
deleted file mode 100644
index 326014f3..00000000
--- a/pkg/storage/cache/index.go
+++ /dev/null
@@ -1,156 +0,0 @@
-package cache
-
-/*
-
-TODO: Revisit if we need this file/package in the future.
-
-import (
- log "github.com/sirupsen/logrus"
- "github.com/weaveworks/libgitops/pkg/runtime"
- "github.com/weaveworks/libgitops/pkg/storage"
- "k8s.io/apimachinery/pkg/runtime/schema"
-)
-
-type index struct {
- storage storage.Storage
- objects map[schema.GroupVersionKind]map[runtime.UID]*cacheObject
-}
-
-func newIndex(storage storage.Storage) *index {
- return &index{
- storage: storage,
- objects: make(map[schema.GroupVersionKind]map[runtime.UID]*cacheObject),
- }
-}
-
-func (i *index) loadByID(gvk schema.GroupVersionKind, uid runtime.UID) (runtime.Object, error) {
- if uids, ok := i.objects[gvk]; ok {
- if obj, ok := uids[uid]; ok {
- log.Tracef("index: cache hit for %s with UID %q", gvk.Kind, uid)
- return obj.loadFull()
- }
- }
-
- log.Tracef("index: cache miss for %s with UID %q", gvk.Kind, uid)
- return nil, nil
-}
-
-func (i *index) loadAll() ([]runtime.Object, error) {
- var size uint64
-
- for gvk := range i.objects {
- size += i.count(gvk)
- }
-
- all := make([]runtime.Object, 0, size)
-
- for gvk := range i.objects {
- if objects, err := i.list(gvk); err == nil {
- all = append(all, objects...)
- } else {
- return nil, err
- }
- }
-
- return all, nil
-}
-
-func store(i *index, obj runtime.Object, apiType bool) error {
- // If store is called for an invalid Object lacking an UID,
- // panic and print the stack trace. This should never happen.
- if obj.GetUID() == "" {
- panic("Attempt to cache invalid Object: missing UID")
- }
-
- co, err := newCacheObject(i.storage, obj, apiType)
- if err != nil {
- return err
- }
-
- gvk := co.object.GetObjectKind().GroupVersionKind()
-
- if _, ok := i.objects[gvk]; !ok {
- i.objects[gvk] = make(map[runtime.UID]*cacheObject)
- }
-
- log.Tracef("index: storing %s object with UID %q, meta: %t", gvk.Kind, obj.GetName(), apiType)
- i.objects[gvk][co.object.GetUID()] = co
-
- return nil
-}
-
-func (i *index) store(obj runtime.Object) error {
- return store(i, obj, false)
-}
-
-func (i *index) storeAll(objs []runtime.Object) (err error) {
- for _, obj := range objs {
- if err = i.store(obj); err != nil {
- break
- }
- }
-
- return
-}
-
-func (i *index) storeMeta(obj runtime.Object) error {
- return store(i, obj, true)
-}
-
-func (i *index) storeAllMeta(objs []runtime.Object) (err error) {
- for _, obj := range objs {
- if uids, ok := i.objects[obj.GetObjectKind().GroupVersionKind()]; ok {
- if _, ok := uids[obj.GetUID()]; ok {
- continue
- }
- }
-
- if err = i.storeMeta(obj); err != nil {
- break
- }
- }
-
- return
-}
-
-func (i *index) delete(gvk schema.GroupVersionKind, uid runtime.UID) {
- if uids, ok := i.objects[gvk]; ok {
- delete(uids, uid)
- }
-}
-
-func (i *index) count(gvk schema.GroupVersionKind) (count uint64) {
- count = uint64(len(i.objects[gvk]))
- log.Tracef("index: counted %d %s object(s)", count, gvk.Kind)
- return
-}
-
-func list(i *index, gvk schema.GroupVersionKind, apiTypes bool) ([]runtime.Object, error) {
- uids := i.objects[gvk]
- list := make([]runtime.Object, 0, len(uids))
-
- log.Tracef("index: listing %s objects, meta: %t", gvk, apiTypes)
- for _, obj := range uids {
- loadFunc := obj.loadFull
- if apiTypes {
- loadFunc = obj.loadAPI
- }
-
- if result, err := loadFunc(); err != nil {
- return nil, err
- } else {
- list = append(list, result)
- }
- }
-
- return list, nil
-}
-
-func (i *index) list(gvk schema.GroupVersionKind) ([]runtime.Object, error) {
- return list(i, gvk, false)
-}
-
-func (i *index) listMeta(gvk schema.GroupVersionKind) ([]runtime.Object, error) {
- return list(i, gvk, true)
-}
-*/
diff --git a/pkg/storage/cache/object.go b/pkg/storage/cache/object.go
deleted file mode 100644
index c0e807cf..00000000
--- a/pkg/storage/cache/object.go
+++ /dev/null
@@ -1,96 +0,0 @@
-package cache
-
-/*
-
-TODO: Revisit if we need this file/package in the future.
-
-import (
- log "github.com/sirupsen/logrus"
- "github.com/weaveworks/libgitops/pkg/runtime"
- "github.com/weaveworks/libgitops/pkg/storage"
-)
-
-type cacheObject struct {
- storage storage.Storage
- object runtime.Object
- checksum string
- apiType bool
-}
-
-func newCacheObject(s storage.Storage, object runtime.Object, apiType bool) (c *cacheObject, err error) {
- c = &cacheObject{
- storage: s,
- object: object,
- apiType: apiType,
- }
-
- if c.checksum, err = s.Checksum(c.object.GroupVersionKind(), c.object.GetUID()); err != nil {
- c = nil
- }
-
- return
-}
-
-// loadFull returns the full Object, loading it only if it hasn't been cached before or the checksum has changed
-func (c *cacheObject) loadFull() (runtime.Object, error) {
- var checksum string
- reload := c.apiType
-
- if !reload {
- if chk, err := c.storage.Checksum(c.object.GroupVersionKind(), c.object.GetUID()); err != nil {
- return nil, err
- } else if chk != c.checksum {
- log.Tracef("cacheObject: %q invalidated, checksum mismatch: %q -> %q", c.object.GetName(), c.checksum, chk)
- checksum = chk
- reload = true
- } else {
- log.Tracef("cacheObject: %q checksum: %q", c.object.GetName(), c.checksum)
- }
- }
-
- if reload {
- log.Tracef("cacheObject: full load triggered for %q", c.object.GetName())
- obj, err := c.storage.Get(c.object.GroupVersionKind(), c.object.GetUID())
- if err != nil {
- return nil, err
- }
-
- // Only apply the change after a successful Get
- c.object = obj
- c.apiType = false
-
- if len(checksum) > 0 {
- c.checksum = checksum
- }
- }
-
- return c.object, nil
-}
-
-// loadAPI returns the APIType of the Object, loading it only if the checksum has changed
-func (c *cacheObject) loadAPI() (runtime.Object, error) {
- if chk, err := c.storage.Checksum(c.object.GroupVersionKind(), c.object.GetUID()); err != nil {
- return nil, err
- } else if chk != c.checksum {
- log.Tracef("cacheObject: %q invalidated, checksum mismatch: %q -> %q", c.object.GetName(), c.checksum, chk)
- log.Tracef("cacheObject: API load triggered for %q", c.object.GetName())
- obj, err := c.storage.GetMeta(c.object.GroupVersionKind(), c.object.GetUID())
- if err != nil {
- return nil, err
- }
-
- // Only apply the change after a successful GetMeta
- c.object = obj
- c.checksum = chk
- c.apiType = true
- } else {
- log.Tracef("cacheObject: %q checksum: %q", c.object.GetName(), c.checksum)
- }
-
- if c.apiType {
- return c.object, nil
- }
-
- return runtime.PartialObjectFrom(c.object), nil
-}
-*/
diff --git a/pkg/storage/client/client.go b/pkg/storage/client/client.go
new file mode 100644
index 00000000..e2488e2e
--- /dev/null
+++ b/pkg/storage/client/client.go
@@ -0,0 +1,286 @@
+package client
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/weaveworks/libgitops/pkg/filter"
+ "github.com/weaveworks/libgitops/pkg/serializer"
+ "github.com/weaveworks/libgitops/pkg/storage/backend"
+ "github.com/weaveworks/libgitops/pkg/storage/core"
+ patchutil "github.com/weaveworks/libgitops/pkg/util/patch"
+ syncutil "github.com/weaveworks/libgitops/pkg/util/sync"
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+ kruntime "k8s.io/apimachinery/pkg/runtime"
+ utilerrs "k8s.io/apimachinery/pkg/util/errors"
+ "k8s.io/apimachinery/pkg/util/sets"
+)
+
+// TODO: Pass an ObjectID that contains all PartialObjectMetadata info for "downstream" consumers
+// that can make use of it by "casting up".
+
+// NewGeneric constructs a new Generic client
+// TODO: Construct the default patcher from the given scheme, make patcher an opt instead
+func NewGeneric(backend backend.Backend) (*Generic, error) {
+ if backend == nil {
+ return nil, fmt.Errorf("backend is mandatory")
+ }
+ return &Generic{backend, serializer.NewPatcher(backend.Encoder(), backend.Decoder())}, nil
+}
+
+// Generic implements the Client interface
+type Generic struct {
+ backend backend.Backend
+ patcher serializer.Patcher
+}
+
+var _ Client = &Generic{}
+
+func (c *Generic) Backend() backend.Backend { return c.backend }
+func (c *Generic) BackendReader() backend.Reader { return c.backend }
+func (c *Generic) BackendWriter() backend.Writer { return c.backend }
+
+// Get returns a new Object for the resource at the specified kind/uid path, based on the file content.
+// In order to only extract the metadata of this object, pass in a *metav1.PartialObjectMetadata
+func (c *Generic) Get(ctx context.Context, key core.ObjectKey, obj Object) error {
+ obj.SetName(key.Name)
+ obj.SetNamespace(key.Namespace)
+
+ return c.backend.Get(ctx, obj)
+}
+
+// List lists Objects for the specific kind. Optionally, filters can be applied (see the filter package
+// for more information, e.g. filter.NameFilter{} and filter.UIDFilter{})
+// You can also pass in an *unstructured.UnstructuredList to get an unknown type's data or
+// *metav1.PartialObjectMetadataList to just get the metadata of all objects of the specified gvk.
+// If you do specify either an *unstructured.UnstructuredList or *metav1.PartialObjectMetadataList,
+// you need to populate TypeMeta with the GVK you want back.
+// TODO: Check if this works with metav1.List{}
+// TODO: Create constructors for the different kinds of lists?
+func (c *Generic) List(ctx context.Context, list ObjectList, opts ...ListOption) error {
+ // This call will verify that list actually is a List type.
+ gvk, err := serializer.GVKForList(list, c.Scheme())
+ if err != nil {
+ return err
+ }
+ // This applies both upstream and custom options
+ listOpts := (&ExtendedListOptions{}).ApplyOptions(opts)
+
+ // Get namespacing info
+ gk := gvk.GroupKind()
+ namespaced, err := c.Backend().Storage().Namespacer().IsNamespaced(gk)
+ if err != nil {
+ return err
+ }
+
+ // By default, only search the given namespace. It is fully valid for this to be an
+ // empty string: it is the only
+ namespaces := sets.NewString(listOpts.Namespace)
+ // However, if the GroupKind is namespaced, and the given "filter namespace" in list
+ // options is empty, it means that one should list all namespaces
+ if namespaced && listOpts.Namespace == "" {
+ namespaces, err = c.Backend().ListNamespaces(ctx, gk)
+ if err != nil {
+ return err
+ }
+ } else if !namespaced && listOpts.Namespace != "" {
+ return errors.New("invalid namespace option: cannot filter namespace for root-spaced object")
+ }
+
+ allIDs := core.NewUnversionedObjectIDSet()
+ for ns := range namespaces {
+ ids, err := c.Backend().ListObjectIDs(ctx, gk, ns)
+ if err != nil {
+ return err
+ }
+ allIDs.InsertSet(ids)
+ }
+
+ // Populate objs through the given (non-buffered) channel
+ ch := make(chan Object)
+ objs := make([]kruntime.Object, 0, allIDs.Len())
+
+ // How should the object be created?
+ createFunc := createObject(gvk, c.Scheme())
+ if serializer.IsPartialObjectList(list) {
+ createFunc = createPartialObject(gvk)
+ } else if serializer.IsUnstructuredList(list) {
+ createFunc = createUnstructuredObject(gvk)
+ }
+ // Temporary processing goroutine; execution starts instantly
+ m := syncutil.RunMonitor(func() error {
+ return c.processKeys(ctx, allIDs, &listOpts.FilterOptions, createFunc, ch)
+ })
+
+ for o := range ch {
+ objs = append(objs, o)
+ }
+
+ if err := m.Wait(); err != nil {
+ return err
+ }
+
+ // Populate the List's Items field with the objects returned
+ return meta.SetList(list, objs)
+}
+
+func (c *Generic) Create(ctx context.Context, obj Object, _ ...CreateOption) error {
+ return c.backend.Create(ctx, obj)
+}
+
+func (c *Generic) Update(ctx context.Context, obj Object, _ ...UpdateOption) error {
+ return c.backend.Update(ctx, obj)
+}
+
+// Patch performs a strategic merge patch on the object with the given UID, using the byte-encoded patch given
+func (c *Generic) Patch(ctx context.Context, obj Object, patch Patch, _ ...PatchOption) error {
+ // Fail-fast: We must never save metadata-only structs
+ if serializer.IsPartialObject(obj) {
+ return backend.ErrCannotSaveMetadata
+ }
+
+ // Acquire the patch data from the "desired state" object given now, i.e. in MergeFrom{}
+ // TODO: Shall we require GVK to be present here using a meta interpreter?
+ patchJSON, err := patch.Data(obj)
+ if err != nil {
+ return err
+ }
+
+ // Load the current latest state into obj temporarily, before patching it
+ // This also validates the GVK, name and namespace.
+ if err := c.backend.Get(ctx, obj); err != nil {
+ return err
+ }
+
+ // Get the right BytePatcher for this patch type
+ // TODO: Make this return an error
+ bytePatcher := patchutil.BytePatcherForType(patch.Type())
+ if bytePatcher == nil {
+ return fmt.Errorf("patch type not supported: %s", patch.Type())
+ }
+
+ // Apply the patch into the object using the given byte patcher
+ if unstruct, ok := obj.(kruntime.Unstructured); ok {
+ // TODO: Provide an option for the schema
+ err = c.patcher.ApplyOnUnstructured(bytePatcher, patchJSON, unstruct, nil)
+ } else {
+ err = c.patcher.ApplyOnStruct(bytePatcher, patchJSON, obj)
+ }
+ if err != nil {
+ return err
+ }
+
+ // Perform an update internally, similar to what .Update would yield
+ // TODO: Maybe write to the Storage conditionally? using DryRun all
+ return c.Update(ctx, obj)
+}
+
+// Delete removes an Object from the backend
+// PartialObjectMetadata should work here.
+func (c *Generic) Delete(ctx context.Context, obj Object, _ ...DeleteOption) error {
+ return c.backend.Delete(ctx, obj)
+}
+
+// DeleteAllOf deletes all matched resources by first doing a List() operation on the given GVK of
+// obj (obj is not used for anything else) and the given filters in opts. Only the Partial Meta
+func (c *Generic) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error {
+ // This applies both upstream and custom options, and propagates the options correctly to both
+ // List() and Delete()
+ customDeleteAllOpts := (&ExtendedDeleteAllOfOptions{}).ApplyOptions(opts)
+
+ // Get the GVK of the object
+ gvk, err := serializer.GVKForObject(c.Scheme(), obj)
+ if err != nil {
+ return err
+ }
+
+ // List all matched objects for the given ListOptions, and GVK.
+ // UnstructuredList is used here so that we can use filters that operate on fields
+ list := &unstructured.UnstructuredList{}
+ list.SetGroupVersionKind(gvk)
+ if err := c.List(ctx, list, customDeleteAllOpts); err != nil {
+ return err
+ }
+
+ // Loop through all of the matched items, and Delete them one-by-one
+ for i := range list.Items {
+ if err := c.Delete(ctx, &list.Items[i], customDeleteAllOpts); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Scheme returns the scheme this client is using.
+func (c *Generic) Scheme() *kruntime.Scheme {
+ return c.Backend().Encoder().GetLockedScheme().Scheme()
+}
+
+// RESTMapper returns the rest this client is using. For now, this returns nil, so don't use.
+func (c *Generic) RESTMapper() meta.RESTMapper {
+ return nil
+}
+
+type newObjectFunc func() (Object, error)
+
+func createObject(gvk core.GroupVersionKind, scheme *kruntime.Scheme) newObjectFunc {
+ return func() (Object, error) {
+ return NewObjectForGVK(gvk, scheme)
+ }
+}
+
+func createPartialObject(gvk core.GroupVersionKind) newObjectFunc {
+ return func() (Object, error) {
+ obj := &metav1.PartialObjectMetadata{}
+ obj.SetGroupVersionKind(gvk)
+ return obj, nil
+ }
+}
+
+func createUnstructuredObject(gvk core.GroupVersionKind) newObjectFunc {
+ return func() (Object, error) {
+ obj := &unstructured.Unstructured{}
+ obj.SetGroupVersionKind(gvk)
+ return obj, nil
+ }
+}
+
+func (c *Generic) processKeys(ctx context.Context, ids core.UnversionedObjectIDSet, filterOpts *filter.FilterOptions, fn newObjectFunc, output chan Object) error {
+ goroutines := []func() error{}
+ _ = ids.ForEach(func(id core.UnversionedObjectID) error {
+ goroutines = append(goroutines, c.processKey(ctx, id, filterOpts, fn, output))
+ return nil
+ })
+
+ defer close(output)
+
+ return utilerrs.AggregateGoroutines(goroutines...)
+}
+
+func (c *Generic) processKey(ctx context.Context, id core.UnversionedObjectID, filterOpts *filter.FilterOptions, fn newObjectFunc, output chan Object) func() error {
+ return func() error {
+ // Create a new object, and decode into it using Get
+ obj, err := fn()
+ if err != nil {
+ return err
+ }
+
+ if err := c.Get(ctx, id.ObjectKey(), obj); err != nil {
+ return err
+ }
+
+ // Match the object against the filters
+ matched, err := filterOpts.Match(obj)
+ if err != nil {
+ return err
+ }
+ if matched {
+ output <- obj
+ }
+
+ return nil
+ }
+}
diff --git a/pkg/storage/client/interfaces.go b/pkg/storage/client/interfaces.go
new file mode 100644
index 00000000..63f5cb79
--- /dev/null
+++ b/pkg/storage/client/interfaces.go
@@ -0,0 +1,57 @@
+package client
+
+import (
+ "errors"
+
+ "github.com/weaveworks/libgitops/pkg/storage/backend"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// Client-related Object aliases
+type Object = client.Object
+type ObjectList = client.ObjectList
+type Patch = client.Patch
+
+// Client-related Option aliases
+type ListOption = client.ListOption
+type CreateOption = client.CreateOption
+type UpdateOption = client.UpdateOption
+type PatchOption = client.PatchOption
+type DeleteOption = client.DeleteOption
+type DeleteAllOfOption = client.DeleteAllOfOption
+
+var (
+ // ErrUnsupportedPatchType is returned when an unsupported patch type is used
+ ErrUnsupportedPatchType = errors.New("unsupported patch type")
+)
+
+type Reader interface {
+ client.Reader
+ BackendReader() backend.Reader
+}
+
+type EventReader interface {
+ Reader
+ // If ctx points to a tag; then only tag updates are followed
+ // If ctx points to a branch; then updates to that branch are included
+ client.WithWatch
+}
+
+type Writer interface {
+ client.Writer
+ BackendWriter() backend.Writer
+}
+
+type StatusClient interface {
+ client.StatusClient
+ BackendStatusWriter() backend.StatusWriter
+}
+
+// Client is an interface for persisting and retrieving API objects to/from a backend
+// One Client instance handles all different Kinds of Objects
+type Client interface {
+ Reader
+ Writer
+ // TODO: StatusClient
+ //client.Client
+}
diff --git a/pkg/storage/client/options.go b/pkg/storage/client/options.go
new file mode 100644
index 00000000..08ec9f0a
--- /dev/null
+++ b/pkg/storage/client/options.go
@@ -0,0 +1,75 @@
+package client
+
+import (
+ "github.com/weaveworks/libgitops/pkg/filter"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+type ExtendedListOption interface {
+ client.ListOption
+ filter.FilterOption
+}
+
+type ExtendedListOptions struct {
+ client.ListOptions
+ filter.FilterOptions
+}
+
+var _ ExtendedListOption = &ExtendedListOptions{}
+
+func (o *ExtendedListOptions) ApplyToList(target *client.ListOptions) {
+ o.ListOptions.ApplyToList(target)
+}
+
+func (o *ExtendedListOptions) ApplyToFilterOptions(target *filter.FilterOptions) {
+ o.FilterOptions.ApplyToFilterOptions(target)
+}
+
+func (o *ExtendedListOptions) ApplyOptions(opts []client.ListOption) *ExtendedListOptions {
+ // Apply the "normal" ListOptions
+ o.ListOptions.ApplyOptions(opts)
+ // Apply all FilterOptions, if they implement that interface
+ for _, opt := range opts {
+ o.FilterOptions.ApplyOption(opt)
+ }
+
+ // If listOpts.Namespace was given, add it to the list of ObjectFilters
+ if len(o.Namespace) != 0 {
+ o.ObjectFilters = append(o.ObjectFilters, filter.NamespaceFilter{Namespace: o.Namespace})
+ }
+ // If listOpts.LabelSelector was given, add it to the list of ObjectFilters
+ if o.LabelSelector != nil {
+ o.ObjectFilters = append(o.ObjectFilters, filter.LabelsFilter{LabelSelector: o.LabelSelector})
+ }
+
+ return o
+}
+
+type ExtendedDeleteAllOfOption interface {
+ ExtendedListOption
+ client.DeleteAllOfOption
+}
+
+type ExtendedDeleteAllOfOptions struct {
+ ExtendedListOptions
+ client.DeleteOptions
+}
+
+var _ ExtendedDeleteAllOfOption = &ExtendedDeleteAllOfOptions{}
+
+func (o *ExtendedDeleteAllOfOptions) ApplyToDeleteAllOf(target *client.DeleteAllOfOptions) {
+ o.DeleteOptions.ApplyToDelete(&target.DeleteOptions)
+}
+
+func (o *ExtendedDeleteAllOfOptions) ApplyOptions(opts []client.DeleteAllOfOption) *ExtendedDeleteAllOfOptions {
+ // Cannot directly apply to o, hence, create a temporary object to which upstream opts are applied
+ do := (&client.DeleteAllOfOptions{}).ApplyOptions(opts)
+ o.ExtendedListOptions.ListOptions = do.ListOptions
+ o.DeleteOptions = do.DeleteOptions
+
+ // Apply all FilterOptions, if they implement that interface
+ for _, opt := range opts {
+ o.FilterOptions.ApplyOption(opt)
+ }
+ return o
+}
diff --git a/pkg/storage/client/transactional/client.go b/pkg/storage/client/transactional/client.go
new file mode 100644
index 00000000..ddc78f5a
--- /dev/null
+++ b/pkg/storage/client/transactional/client.go
@@ -0,0 +1,291 @@
+package transactional
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+ "sync"
+
+ "github.com/go-logr/logr"
+ "github.com/weaveworks/libgitops/pkg/storage/backend"
+ "github.com/weaveworks/libgitops/pkg/storage/client"
+ "github.com/weaveworks/libgitops/pkg/storage/commit"
+ "github.com/weaveworks/libgitops/pkg/storage/core"
+ "go.uber.org/atomic"
+ "k8s.io/apimachinery/pkg/types"
+ utilerrs "k8s.io/apimachinery/pkg/util/errors"
+)
+
+var _ Client = &genericWithRef{}
+
+func NewGeneric(c client.Client, manager TransactionManager) (Client, error) {
+ if c == nil {
+ return nil, fmt.Errorf("%w: c is required", core.ErrInvalidParameter)
+ }
+ if manager == nil {
+ return nil, fmt.Errorf("%w: manager is required", core.ErrInvalidParameter)
+ }
+ g := &generic{
+ c: c,
+ //lockMap: syncutil.NewNamedLockMap(),
+ txHooks: &MultiTransactionHook{},
+ commitHooks: &MultiCommitHook{},
+ manager: manager,
+ txs: make(map[types.UID]*atomic.Bool),
+ txsMu: &sync.Mutex{},
+ }
+ return &genericWithRef{g, nil, commit.Default()}, nil
+}
+
+type generic struct {
+ c client.Client
+
+ //lockMap syncutil.NamedLockMap
+
+ // Hooks
+ txHooks TransactionHookChain
+ commitHooks CommitHookChain
+
+ // +required
+ manager TransactionManager
+
+ txs map[types.UID]*atomic.Bool
+ txsMu *sync.Mutex
+}
+
+type genericWithRef struct {
+ *generic
+ hash commit.Hash
+ ref commit.Ref
+}
+
+func (c *genericWithRef) AtHash(h commit.Hash) Client {
+ return &genericWithRef{generic: c.generic, hash: h, ref: c.ref}
+}
+func (c *genericWithRef) AtRef(symbolic commit.Ref) Client {
+ // TODO: Invalid (programmer error) to pass symbolic == nil
+ return &genericWithRef{generic: c.generic, hash: c.hash, ref: symbolic}
+}
+func (c *genericWithRef) CurrentRef() commit.Ref {
+ return c.ref
+}
+func (c *genericWithRef) CurrentHash() (commit.Hash, error) {
+ // Use the fixed hash if set
+ if c.hash != nil {
+ return c.hash, nil
+ }
+ // Otherwise, lookup the symbolic
+ return c.ref.Resolve(c.manager.RefResolver())
+}
+
+func (c *genericWithRef) Get(ctx context.Context, key core.ObjectKey, obj client.Object) error {
+ return c.defaultCtxCommitRef(ctx, func(ctx context.Context) error {
+ return c.c.Get(ctx, key, obj)
+ })
+}
+
+func (c *genericWithRef) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
+ return c.defaultCtxCommitRef(ctx, func(ctx context.Context) error {
+ return c.c.List(ctx, list, opts...)
+ })
+}
+
+// defaultCtxCommitRef makes sure that there's either commit.Hash registered with the context when reading
+// TODO: In the future, shall filesystems also support commit.Ref?
+func (c *genericWithRef) defaultCtxCommitRef(ctx context.Context, callback func(ctx context.Context) error) error {
+ // If ctx already specifies an immutable version to read, use it
+ if _, ok := commit.GetHash(ctx); ok {
+ return callback(ctx)
+ }
+ // If ctx specifies a symbolic target, resolve it
+ if ref, ok := commit.GetRef(ctx); ok {
+ h, err := ref.Resolve(c.manager.RefResolver())
+ if err != nil {
+ return err
+ }
+ return callback(commit.WithHash(ctx, h))
+ }
+
+ // Otherwise, look it up based on this client's data
+ h, err := c.CurrentHash()
+ if err != nil {
+ return err
+ }
+
+ // TODO: At what point should we resolve the "branch" -> "commit" part? Should we expect that to be done in the
+ // filesystem only?
+ return callback(commit.WithHash(ctx, h))
+}
+
+func (c *genericWithRef) txStateByUID(uid types.UID) *atomic.Bool {
+ // c.txsMu guards reads and writes of the c.txs map
+ c.txsMu.Lock()
+ defer c.txsMu.Unlock()
+
+ // Check if information about a transaction on this branch exists.
+ state, ok := c.txs[uid]
+ if ok {
+ return state
+ }
+ // if not, grow the txs map by one and return it
+ c.txs[uid] = atomic.NewBool(false)
+ return c.txs[uid]
+}
+func (c *genericWithRef) initTx(ctx context.Context, info TxInfo) (context.Context, txFunc, error) {
+ log := logr.FromContextOrDiscard(ctx)
+
+ active := c.txStateByUID(info.Target.UUID())
+ // If active == false, then this will switch active => true and return true
+ // If active == true, then no operation will take place, and false is returned
+ // In other words, if false is returned, a transaction with this UID is ongoing.
+ // However, a UID conflict is very unlikely, given randomness and length of the UID
+ if !active.CAS(false, true) {
+ // TODO: Avoid this possibility
+ return nil, nil, errors.New("should never happen; UID conflict")
+ }
+
+ // Create a child context with a timeout
+ dlCtx, cleanupTimeout := context.WithTimeout(ctx, info.Options.Timeout)
+
+ // This function cleans up the transaction, and unlocks the tx muted
+ cleanupFunc := func() error {
+ // Cleanup after the transaction
+ if err := c.cleanupAfterTx(ctx, &info); err != nil {
+ return fmt.Errorf("Failed to cleanup branch %s after tx: %v", info.Target.DestBranch(), err)
+ }
+ // Avoid leaking memory by growing c.txs infinitely
+ c.txsMu.Lock()
+ delete(c.txs, info.Target.UUID())
+ c.txsMu.Unlock()
+ return nil
+ }
+
+ // Start waiting for the cancellation of the deadline context.
+ go func() {
+ // Wait for the context to either timeout or be cancelled
+ <-dlCtx.Done()
+ // This guard makes sure the cleanup function runs exactly
+ // once, regardless of transaction end cause.
+ if active.CAS(true, false) {
+ if err := cleanupFunc(); err != nil {
+ log.Error(err, "failed to cleanup after tx timeout")
+ }
+ }
+ }()
+
+ abortFunc := func() error {
+ // The transaction ended; the caller is either Abort() or
+ // at the end of a successful transaction. The cause of
+ // Abort() happening can also be a context cancellation.
+ // If the parent context was cancelled or timed out; this
+ // function and the above function race to set active => 0
+ // Regardless, due to the atomic nature of the operation,
+ // cleanupFunc() will only be run once.
+ if active.CAS(true, false) {
+ // We can now stop the timeout timer
+ cleanupTimeout()
+ // Clean up the transaction
+ return cleanupFunc()
+ }
+ return nil
+ }
+
+ return dlCtx, abortFunc, nil
+}
+
+func (c *genericWithRef) cleanupAfterTx(ctx context.Context, info *TxInfo) error {
+ // Always both clean the writable area, and run post-tx tasks
+ return utilerrs.NewAggregate([]error{
+ c.manager.Abort(ctx, info),
+ // TODO: should this be in its own goroutine to switch back to main
+ // ASAP?
+ c.TransactionHookChain().PostTransactionHook(ctx, *info),
+ })
+}
+
+func (c *genericWithRef) BackendReader() backend.Reader {
+ return c.c.BackendReader()
+}
+
+func (c *genericWithRef) TransactionManager() TransactionManager {
+ return c.manager
+}
+
+func (c *genericWithRef) TransactionHookChain() TransactionHookChain {
+ return c.txHooks
+}
+
+func (c *genericWithRef) CommitHookChain() CommitHookChain {
+ return c.commitHooks
+}
+
+func (c *genericWithRef) Transaction(ctx context.Context, headBranch string, opts ...TxOption) Tx {
+ tx, err := c.transaction(ctx, headBranch, opts...)
+ if err != nil {
+ // TODO: Return a Tx with an error included
+ panic(err)
+ }
+ return tx
+}
+
+var ErrVersionRefIsImmutable = errors.New("cannot execute transaction against immutable version ref")
+
+func (c *genericWithRef) transaction(ctx context.Context, headBranch string, opts ...TxOption) (Tx, error) {
+ log := logr.FromContextOrDiscard(ctx)
+
+ // Get the immutable base version hash
+ baseHash, err := c.ref.Resolve(c.manager.RefResolver())
+ if err != nil {
+ return nil, err
+ }
+
+ // Append random bytes to the end of the head branch if it ends with a dash
+ if strings.HasSuffix(headBranch, "-") {
+ suffix, err := randomSHA(4)
+ if err != nil {
+ return nil, err
+ }
+ headBranch += suffix
+ }
+
+ log.V(2).Info("Base commit hash: %q. Head branch: %q.", baseHash, headBranch)
+
+ // Parse options
+ o := defaultTxOptions().ApplyOptions(opts)
+
+ target := commit.NewMutableTarget(headBranch, baseHash)
+ info := TxInfo{
+ Target: target,
+ Options: *o,
+ }
+
+ // Register the head branch with the context
+ // TODO: We should register all of TxInfo here instead, or ...?
+ ctxWithDestBranch := commit.WithMutableTarget(ctx, target)
+ // Initialize the transaction
+ ctxWithDeadline, cleanupFunc, err := c.initTx(ctxWithDestBranch, info)
+ if err != nil {
+ return nil, err
+ }
+
+ // Run pre-tx checks and create the new branch
+ // TODO: Use uber's multierr?
+ if err := utilerrs.NewAggregate([]error{
+ c.TransactionHookChain().PreTransactionHook(ctxWithDeadline, info),
+ c.manager.Init(ctxWithDeadline, &info),
+ }); err != nil {
+ return nil, err
+ }
+
+ return &txImpl{
+ txCommon: &txCommon{
+ c: c.c,
+ manager: c.manager,
+ commitHook: c.CommitHookChain(),
+ ctx: ctxWithDeadline,
+ info: info,
+ cleanupFunc: cleanupFunc,
+ },
+ }, nil
+}
diff --git a/pkg/storage/client/transactional/commit.go b/pkg/storage/client/transactional/commit.go
new file mode 100644
index 00000000..a46cbccb
--- /dev/null
+++ b/pkg/storage/client/transactional/commit.go
@@ -0,0 +1,122 @@
+package transactional
+
+/*
+// Commit describes a result of a transaction.
+type Commit interface {
+ // GetAuthor describes the author of this commit.
+ // +required
+ GetAuthor() CommitAuthor
+ // GetMessage describes the change in this commit.
+ // +required
+ GetMessage() CommitMessage
+ // Validate validates that all required fields are set, and given data is valid.
+ Validate() error
+}
+
+type CommitAuthor interface {
+ // GetName describes the author's name (e.g. as per git config)
+ // +required
+ GetName() string
+ // GetEmail describes the author's email (e.g. as per git config).
+ // It is optional generally, but might be required by some specific
+ // implementations.
+ // +optional
+ GetEmail() string
+ // The String() method must return a (ideally both human- and machine-
+ // readable) concatenated string including the name and email (if
+ // applicable) of the author.
+ fmt.Stringer
+}
+
+type CommitMessage interface {
+ // GetTitle describes the change concisely, so it can be used e.g. as
+ // a commit message or PR title. Certain implementations might enforce
+ // character limits on this string.
+ // +required
+ GetTitle() string
+ // GetDescription contains optional extra, more detailed information
+ // about the change.
+ // +optional
+ GetDescription() string
+ // The String() method must return a (ideally both human- and machine-
+ // readable) concatenated string including the title and description
+ // (if applicable) of the author.
+ fmt.Stringer
+}
+
+// GenericCommitResult implements Commit.
+var _ Commit = GenericCommit{}
+
+// GenericCommit implements Commit.
+type GenericCommit struct {
+ // GetAuthor describes the author of this commit.
+ // +required
+ Author CommitAuthor
+ // GetMessage describes the change in this commit.
+ // +required
+ Message CommitMessage
+}
+
+func (r GenericCommit) GetAuthor() CommitAuthor { return r.Author }
+func (r GenericCommit) GetMessage() CommitMessage { return r.Message }
+
+func (r GenericCommit) Validate() error {
+ v := validation.New("GenericCommit")
+ if len(r.Author.GetName()) == 0 {
+ v.Required("Author.GetName")
+ }
+ if len(r.Message.GetTitle()) == 0 {
+ v.Required("Message.GetTitle")
+ }
+ return v.Error()
+}
+
+// GenericCommitAuthor implements CommitAuthor.
+var _ CommitAuthor = GenericCommitAuthor{}
+
+// GenericCommit implements Commit.
+type GenericCommitAuthor struct {
+ // Name describes the author's name (as per git config)
+ // +required
+ Name string
+ // Email describes the author's email (as per git config)
+ // +optional
+ Email string
+}
+
+func (r GenericCommitAuthor) GetName() string { return r.Name }
+func (r GenericCommitAuthor) GetEmail() string { return r.Email }
+
+func (r GenericCommitAuthor) String() string {
+ if len(r.Email) != 0 {
+ return fmt.Sprintf("%s <%s>", r.Name, r.Email)
+ }
+ return r.Name
+}
+
+// GenericCommitMessage implements CommitMessage.
+var _ CommitMessage = GenericCommitMessage{}
+
+// GenericCommitMessage implements CommitMessage.
+type GenericCommitMessage struct {
+ // Title describes the change concisely, so it can be used e.g. as
+ // a commit message or PR title. Certain implementations might enforce
+ // character limits on this string.
+ // +required
+ Title string
+ // Description contains optional extra, more detailed information
+ // about the change.
+ // +optional
+ Description string
+}
+
+func (r GenericCommitMessage) GetTitle() string { return r.Title }
+func (r GenericCommitMessage) GetDescription() string { return r.Description }
+
+func (r GenericCommitMessage) String() string {
+ if len(r.Description) != 0 {
+ return fmt.Sprintf("%s\n\n%s", r.Title, r.Description)
+ }
+ return r.Title
+}
+*/
diff --git a/pkg/storage/client/transactional/distributed/client.go b/pkg/storage/client/transactional/distributed/client.go
new file mode 100644
index 00000000..230e4958
--- /dev/null
+++ b/pkg/storage/client/transactional/distributed/client.go
@@ -0,0 +1,377 @@
+package distributed
+
+import (
+ "context"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/go-logr/logr"
+ "github.com/weaveworks/libgitops/pkg/storage/client"
+ "github.com/weaveworks/libgitops/pkg/storage/client/transactional"
+ "github.com/weaveworks/libgitops/pkg/storage/commit"
+ "github.com/weaveworks/libgitops/pkg/storage/core"
+ "k8s.io/apimachinery/pkg/util/wait"
+)
+
+// NewClient creates a new distributed Client using the given underlying transactional Client,
+// remote, and options that configure how the Client should respond to network partitions.
+func NewClient(c transactional.Client, remote Remote, opts ...ClientOption) (Client, error) {
+ if c == nil {
+ return nil, fmt.Errorf("%w: c is mandatory", core.ErrInvalidParameter)
+ }
+ if remote == nil {
+ return nil, fmt.Errorf("%w: remote is mandatory", core.ErrInvalidParameter)
+ }
+
+ o := defaultOptions().ApplyOptions(opts)
+
+ g := &generic{
+ GenericClient: c,
+ remote: remote,
+ opts: *o,
+ branchLocks: make(map[string]*branchLock),
+ branchLocksMu: &sync.Mutex{},
+ }
+
+ // Construct the default client
+ dc := &genericWithRef{g, nil, commit.Default()}
+
+ // Register ourselves to hook into the transactional.Client's operations
+ c.CommitHookChain().Register(dc)
+ c.TransactionHookChain().Register(dc)
+
+ return dc, nil
+}
+
+type generic struct {
+ transactional.GenericClient
+ remote Remote
+ opts ClientOptions
+ // branchLocks maps a given branch to a given lock the state of the branch
+ branchLocks map[string]*branchLock
+ // branchLocksMu guards branchLocks
+ branchLocksMu *sync.Mutex
+}
+
+type genericWithRef struct {
+ *generic
+ hash commit.Hash
+ ref commit.Ref
+}
+
+func (c *genericWithRef) AtHash(h commit.Hash) Client {
+ return &genericWithRef{generic: c.generic, hash: h, ref: c.ref}
+}
+func (c *genericWithRef) AtRef(symbolic commit.Ref) Client {
+ // TODO: Invalid (programmer error) to pass symbolic == nil
+ return &genericWithRef{generic: c.generic, hash: c.hash, ref: symbolic}
+}
+func (c *genericWithRef) CurrentRef() commit.Ref {
+ return c.ref
+}
+func (c *genericWithRef) CurrentHash() (commit.Hash, error) {
+ // Use the fixed hash if set
+ if c.hash != nil {
+ return c.hash, nil
+ }
+ // Otherwise, lookup the symbolic
+ return c.ref.Resolve(c.TransactionManager().RefResolver())
+}
+
+type branchLockKeyImpl struct{}
+
+var branchLockKey = branchLockKeyImpl{}
+
+type branchLock struct {
+ // mu should be write-locked whenever the branch is actively running any
+ // function from the remote
+ mu *sync.RWMutex
+ // lastPull is guarded by mu, before reading, one should RLock mu
+ lastPull time.Time
+}
+
+/*
+ TxMode.AllowReads is incompatible with the PC/EC distributed mode, and might be with the PC/EL mode.
+ Ahh, just completely remove the AllowReads mode. If the person wants to do a read for a specific version
+ while a tx is going on for a branch, they just need to specify the direct commit.
+*/
+
+func (c *genericWithRef) Get(ctx context.Context, key core.ObjectKey, obj client.Object) error {
+ return c.readWhenPossible(ctx, func(ctx context.Context) error {
+ return c.GenericClient.Get(ctx, key, obj)
+ })
+}
+
+func (c *genericWithRef) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
+ return c.readWhenPossible(ctx, func(ctx context.Context) error {
+ return c.GenericClient.List(ctx, list, opts...)
+ })
+}
+
+func (c *genericWithRef) readWhenPossible(ctx context.Context, operation func(context.Context) error) error {
+ // If the read is immutable, just proceed
+ if _, ok := commit.GetHash(ctx); ok {
+ return operation(ctx)
+ }
+ if c.hash != nil {
+ return operation(commit.WithHash(ctx, c.hash))
+ }
+
+ // Use the ref from the context, if set, otherwise default to the one configured
+ // in this Client.
+ ref, ok := commit.GetRef(ctx)
+ if !ok {
+ ref = c.ref
+ }
+
+ // If the read is reference-based; look it up if it needs resync first
+ if c.needsResync(ref) {
+ // Try to pull the remote ref. If it fails, use returnErr to figure out if
+ // this (depending on the configured PACELC mode) is a critical error, or if we
+ // should continue with the read
+ if err := c.pull(ctx, ref); err != nil {
+ if criticalErr := c.returnErr(err); criticalErr != nil {
+ return criticalErr
+ }
+ }
+ }
+ // Do the read operation
+ return operation(commit.WithRef(ctx, ref))
+}
+
+// makes a string representation of the ref that is used to uniquely determine
+// if two refs are "similar" (i.e. are touching the same resource to be pulled)
+func refToStr(ref commit.Ref) string {
+ return fmt.Sprintf("%s-%s", ref.Type(), ref.Target())
+}
+
+func (c *genericWithRef) getBranchLockInfo(ref commit.Ref) *branchLock {
+ c.branchLocksMu.Lock()
+ defer c.branchLocksMu.Unlock()
+
+ // Check if there exists a lock for that ref
+ str := refToStr(ref)
+ info, ok := c.branchLocks[str]
+ if ok {
+ return info
+ }
+ // Write to the branchLocks map
+ c.branchLocks[str] = &branchLock{
+ mu: &sync.RWMutex{},
+ }
+ return c.branchLocks[str]
+}
+
+func (c *genericWithRef) needsResync(ref commit.Ref) bool {
+ // Always resync if the cache is always directly invalidated
+ cacheValid := c.opts.CacheValidDuration
+ if cacheValid == 0 {
+ return true
+ }
+
+ lck := c.getBranchLockInfo(ref)
+ // Lock while reading the last resync time
+ lck.mu.RLock()
+ defer lck.mu.RUnlock()
+ // Resync if there has been no sync so far, or if the last resync was too long ago
+ return lck.lastPull.IsZero() || time.Since(lck.lastPull) > cacheValid
+}
+
+// StartResyncLoop starts a resync loop for the given branches for
+// the given interval.
+//
+// resyncCacheInterval specifies the interval for which resyncs
+// (remote Pulls) should be run in the background. The duration must
+// be positive, and non-zero.
+//
+// resyncBranches specifies what branches to resync. The default is
+// []commit.Ref{commit.Default()}, i.e. only the "default" branch.
+//
+// ctx should be used to cancel the loop, if needed.
+//
+// While it is technically possible to start many of these resync
+// loops, it is not recommended. Start it once, for all the branches
+// you need. The branches will be pulled synchronously in order. The
+// resync interval is non-sliding, which means that the interval
+// includes the time of the operations.
+func (c *genericWithRef) StartResyncLoop(ctx context.Context, resyncCacheInterval time.Duration, sync ...commit.Ref) {
+ log := c.logger(ctx)
+ // Only start this loop if resyncCacheInterval > 0
+ if resyncCacheInterval <= 0 {
+ log.Info("No need to start the resync loop; resyncCacheInterval <= 0")
+ return
+ }
+ // If unset, only sync the default branch.
+ if sync == nil {
+ sync = []commit.Ref{commit.Default()}
+ }
+
+ // Start the resync goroutine
+ go c.resyncLoop(ctx, resyncCacheInterval, sync)
+}
+
+func (c *genericWithRef) logger(ctx context.Context) logr.Logger {
+ return logr.FromContextOrDiscard(ctx).WithName("distributed.Client")
+}
+
+func (c *genericWithRef) resyncLoop(ctx context.Context, resyncCacheInterval time.Duration, sync []commit.Ref) {
+ log := c.logger(ctx).WithName("resyncLoop")
+ log.V(2).Info("starting resync loop")
+
+ wait.NonSlidingUntilWithContext(ctx, func(_ context.Context) {
+
+ for _, branch := range sync {
+ log.V(2).Info("resyncLoop: Will perform pull operation on branch: %q", branch)
+ // Perform a fetch, pull & checkout of the new revision
+ if err := c.pull(ctx, branch); err != nil {
+ log.Error(err, "remote pull failed")
+ return
+ }
+ }
+ }, resyncCacheInterval)
+ log.V(2).Info("context cancelled, exiting resync loop")
+}
+
+func (c *genericWithRef) pull(ctx context.Context, ref commit.Ref) error {
+ // Need to get the branch-specific lock variable
+ lck := c.getBranchLockInfo(ref)
+ // Write-lock while this operation is in progress
+ lck.mu.Lock()
+ defer lck.mu.Unlock()
+
+ // Create a new context that times out after the given duration
+ ctx, cancel := context.WithTimeout(ctx, c.opts.PullTimeout)
+ defer cancel()
+
+ // Make a ctx with the given ref
+ ctx = commit.WithRef(ctx, ref)
+ if err := c.remote.Pull(ctx); err != nil {
+ return err
+ }
+
+ // Register the timestamp into the lock
+ lck.lastPull = time.Now()
+ return nil
+}
+
+func (c *genericWithRef) PreTransactionHook(ctx context.Context, info transactional.TxInfo) error {
+ // We count on ctx having the VersionRef registered for the head branch
+
+ // Always Pull the _base_ branch before a transaction, to be up-to-date
+ // before creating the new head branch
+ ref := commit.AtBranch(info.Target.DestBranch())
+ if err := c.pull(ctx, ref); err != nil {
+ // TODO: Consider a wrapping closure here instead of having to remember to
+ // wrap the error in returnErr
+ return c.returnErr(err)
+ }
+
+ return nil
+}
+
+func (c *genericWithRef) PreCommitHook(context.Context, transactional.TxInfo, commit.Request) error {
+ return nil // nothing to do here
+}
+
+func (c *genericWithRef) PostCommitHook(ctx context.Context, info transactional.TxInfo, _ commit.Request) error {
+ // Push the branch in the ctx
+ ref := commit.AtBranch(info.Target.DestBranch())
+ if err := c.push(ctx, ref); err != nil {
+ return c.returnErr(err)
+ }
+ return nil
+}
+
+func (c *genericWithRef) PostTransactionHook(context.Context, transactional.TxInfo) error {
+ return nil // nothing to do here; if we had locking capability one would unlock
+}
+
+func (c *genericWithRef) Remote() Remote { return c.remote }
+
+func (c *genericWithRef) returnErr(err error) error {
+ // If RemoteErrorStream isn't defined, just pass the error through
+ if c.opts.RemoteErrorStream == nil {
+ return err
+ }
+ // Non-blocking send to the channel, and no return error
+ go func() {
+ c.opts.RemoteErrorStream <- err
+ }()
+ return nil
+}
+
+func (c *genericWithRef) push(ctx context.Context, ref commit.Ref) error {
+ // Need to get the branch-specific lock variable
+ lck := c.getBranchLockInfo(ref)
+ // Write-lock while this operation is in progress
+ lck.mu.Lock()
+ defer lck.mu.Unlock()
+
+ // Create a new context that times out after the given duration
+ ctx, cancel := context.WithTimeout(ctx, c.opts.PushTimeout)
+ defer cancel()
+
+ // Push the head branch using the remote
+ // If the Push fails, don't execute any other later statements
+ return c.remote.Push(ctx)
+}
+
+/*
+
+func (c *genericWithRef) branchFromCtx(ctx context.Context) string {
+ return core.GetVersionRef(ctx).Branch()
+}
+
+// Lock the branch for writing, if supported by the remote
+ // If the lock fails, we DO NOT try to pull, but just exit (either with err or a nil error,
+ // depending on the configured PACELC mode)
+ // TODO: Can we rely on the timeout being exact enough here?
+ // TODO: How to do this before the branch even exists...?
+ if err := c.lock(ctx, info.Options.Timeout); err != nil {
+ return c.returnErr(err)
+ }
+
+// Unlock the head branch, if supported
+ if err := c.unlock(ctx); err != nil {
+ return c.returnErr(err)
+ }
+
+func (c *genericWithRef) lock(ctx context.Context, d time.Duration) error {
+ lr, ok := c.remote.(LockableRemote)
+ if !ok {
+ return nil
+ }
+
+ // Need to get the branch-specific lock variable
+ lck := c.getBranchLockInfo(c.branchFromCtx(ctx))
+ // Write-lock while this operation is in progress
+ lck.mu.Lock()
+ defer lck.mu.Unlock()
+
+ // Enforce a timeout
+ lockCtx, cancel := context.WithTimeout(ctx, c.opts.LockTimeout)
+ defer cancel()
+
+ return lr.Lock(lockCtx, d)
+}
+
+func (c *genericWithRef) unlock(ctx context.Context) error {
+ lr, ok := c.remote.(LockableRemote)
+ if !ok {
+ return nil
+ }
+
+ // Need to get the branch-specific lock variable
+ lck := c.getBranchLockInfo(c.branchFromCtx(ctx))
+ // Write-lock while this operation is in progress
+ lck.mu.Lock()
+ defer lck.mu.Unlock()
+
+ // Enforce a timeout
+ unlockCtx, cancel := context.WithTimeout(ctx, c.opts.LockTimeout)
+ defer cancel()
+
+ return lr.Unlock(unlockCtx)
+}
+*/
diff --git a/pkg/storage/client/transactional/distributed/git/filesystem.go b/pkg/storage/client/transactional/distributed/git/filesystem.go
new file mode 100644
index 00000000..4c8160f3
--- /dev/null
+++ b/pkg/storage/client/transactional/distributed/git/filesystem.go
@@ -0,0 +1,589 @@
+package git
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "io/fs"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sync"
+ "time"
+
+ "github.com/fluxcd/go-git-providers/gitprovider"
+ "github.com/go-git/go-git/v5"
+ "github.com/go-git/go-git/v5/config"
+ "github.com/go-git/go-git/v5/plumbing"
+ "github.com/go-git/go-git/v5/plumbing/object"
+ "github.com/go-logr/logr"
+ "github.com/weaveworks/libgitops/pkg/storage/client/transactional"
+ "github.com/weaveworks/libgitops/pkg/storage/client/transactional/distributed"
+ "github.com/weaveworks/libgitops/pkg/storage/commit"
+ "github.com/weaveworks/libgitops/pkg/storage/filesystem"
+ "github.com/weaveworks/libgitops/pkg/util/structerr"
+ "go.uber.org/multierr"
+ "k8s.io/apimachinery/pkg/types"
+)
+
+const (
+ ErrImmutableFilesystem = stringError("git clone is immutable; start a transaction to mutate")
+)
+
+type stringError string
+
+func (s stringError) Error() string { return string(s) }
+
+var (
+ _ filesystem.Filesystem = &Git{}
+ _ transactional.TransactionManager = &Git{}
+ _ distributed.Remote = &Git{}
+)
+
+func New(ctx context.Context, repoRef gitprovider.RepositoryRef, opts ...Option) (*Git, error) {
+ log := logr.FromContextOrDiscard(ctx)
+
+ o := defaultOpts().ApplyOptions(opts)
+
+ tmpDir, err := ioutil.TempDir("", "libgitops")
+ if err != nil {
+ return nil, err
+ }
+ log.V(2).Info("created temp directory to store Git clones in", "dir", tmpDir)
+ tmpDirTyped := rootDir(tmpDir)
+
+ transportType := gitprovider.TransportTypeHTTPS // default
+ if o.AuthMethod != nil {
+ // TODO: parse the URL instead
+ transportType = o.AuthMethod.TransportType()
+ }
+ cloneURL := repoRef.GetCloneURL(transportType)
+
+ cloneOpts := &git.CloneOptions{
+ URL: cloneURL,
+ Auth: o.AuthMethod,
+ SingleBranch: true,
+ NoCheckout: true,
+ //Depth: 1, // ref: https://github.com/go-git/go-git/issues/207
+ RecurseSubmodules: 0,
+ Progress: nil,
+ Tags: git.NoTags,
+ }
+ if o.MainBranch != "" {
+ cloneOpts.ReferenceName = plumbing.NewBranchReferenceName(o.MainBranch)
+ }
+
+ log.Info("cloning the repository", "repo-ref", repoRef)
+ // Do a base clone to the temporary directory
+ bareDir := filepath.Join(tmpDir, "root.git")
+ repo, err := git.PlainCloneContext(ctx, bareDir, true, cloneOpts)
+ // Handle errors
+ if errors.Is(err, context.DeadlineExceeded) {
+ return nil, fmt.Errorf("git clone operation timed out: %w", err)
+ } else if errors.Is(err, context.Canceled) {
+ return nil, fmt.Errorf("git clone was cancelled: %w", err)
+ } else if err != nil {
+ return nil, fmt.Errorf("git clone error: %v", err)
+ }
+
+ // Enable the uploadpack.allowReachableSHA1InWant option
+ // http://git-scm.com/docs/git-config#Documentation/git-config.txt-uploadpackallowReachableSHA1InWant
+ c, err := repo.Config()
+ if err != nil {
+ return nil, err
+ }
+ gitCfgBytes, _ := c.Marshal()
+ log.V(2).Info("git config before", "git-config", string(gitCfgBytes))
+
+ c.Raw.Section("uploadpack").SetOption("allowReachableSHA1InWant", "true")
+
+ gitCfgBytes, _ = c.Marshal()
+ log.V(2).Info("git config after", "git-config", string(gitCfgBytes))
+
+ if err := repo.SetConfig(c); err != nil {
+ return nil, err
+ }
+
+ // HEAD should be by default a symbolic reference to the main branch
+ // TODO: Does this exist for a bare repository?
+ r, err := repo.Head()
+ if err != nil {
+ return nil, err
+ }
+ mainBranch := string(r.Target())
+ log.V(2).Info("got main branch", "main-branch", mainBranch)
+
+ return &Git{
+ Filesystem: filesystem.FromContext(&fileSystem{
+ bareRepo: repo,
+ rootDir: tmpDirTyped,
+ defaultBranch: mainBranch,
+ }),
+ rootDir: tmpDirTyped,
+ bareDir: bareDir,
+ bareRepo: repo,
+ defaultBranch: mainBranch,
+ }, nil
+}
+
+type rootDir string
+
+func (d rootDir) gitDirFor(target commit.MutableTarget) string {
+ return filepath.Join(string(d), string(target.UUID())) // +".git" TODO is this needed?
+}
+
+// TODO: Add a FilesystemFor(dir string) Filesystem method
+type Git struct {
+ filesystem.Filesystem
+ rootDir
+ bareDir string
+ bareRepo *git.Repository
+ defaultBranch string
+
+ localClones map[types.UID]*localClone
+ localClonesMu *sync.Mutex
+}
+
+type localClone struct {
+ repo *git.Repository
+ wt *git.Worktree
+ origin *git.Remote
+ target commit.MutableTarget
+}
+
+func (g *Git) localCloneByUUID(uuid types.UID) (*localClone, bool) {
+ // c.txsMu guards reads and writes of the c.txs map
+ g.localClonesMu.Lock()
+ defer g.localClonesMu.Unlock()
+
+ // Check if information about a transaction on this branch exists.
+ lc, ok := g.localClones[uuid]
+ if ok {
+ return lc, true
+ }
+ // if not, grow the localClones map by one and return it
+ g.localClones[uuid] = &localClone{}
+ return g.localClones[uuid], false
+}
+
+var _ structerr.StructError = &OngoingTransactionError{}
+
+// Maybe move this to the transactional package?
+type OngoingTransactionError struct {
+ Target commit.MutableTarget
+}
+
+func (e *OngoingTransactionError) Error() string {
+ msg := "cannot start a transaction with an UUID that already exists"
+ if e.Target == nil {
+ return msg
+ }
+ return fmt.Sprintf("%s: %s (base: %s, target: %s)", msg, e.Target.UUID(), e.Target.BaseCommit(), e.Target.DestBranch())
+}
+
+func (e *OngoingTransactionError) Is(err error) bool {
+ _, ok := err.(*OngoingTransactionError)
+ return ok
+}
+
+func (g *Git) Init(ctx context.Context, tx *transactional.TxInfo) error {
+ target := tx.Target // TODO: Check for nil or not?
+
+ lc, exists := g.localCloneByUUID(target.UUID())
+ if exists {
+ return &OngoingTransactionError{Target: target}
+ }
+
+ // Do a "git init", as per the instructions at
+ // https://stackoverflow.com/questions/31278902/how-to-shallow-clone-a-specific-commit-with-depth-1
+ var err error
+ lc.repo, err = git.PlainInit(g.gitDirFor(target), false)
+ if err != nil {
+ return err
+ }
+ // Register the bare local clone as "origin"
+ lc.origin, err = lc.repo.CreateRemote(&config.RemoteConfig{
+ Name: "origin",
+ URLs: []string{g.bareDir},
+ })
+ if err != nil {
+ return err
+ }
+ // Fetch only this specific commit from the origin to HEAD, at depth 1
+ refSpec := config.RefSpec(fmt.Sprintf("%s:refs/heads/HEAD", target.BaseCommit()))
+ if err := lc.origin.FetchContext(ctx, &git.FetchOptions{
+ RefSpecs: []config.RefSpec{refSpec},
+ Depth: 1,
+ Tags: git.NoTags,
+ }); err != nil {
+ return err
+ }
+ // Now, check out the worktree
+ lc.wt, err = lc.repo.Worktree()
+ if err != nil {
+ return err
+ }
+ // Create a new branch from the fetched commit, with the head branch name
+ if err := lc.wt.Checkout(&git.CheckoutOptions{
+ Hash: *hashToGoGit(target.BaseCommit()),
+ Branch: plumbing.NewBranchReferenceName(target.DestBranch()),
+ Create: true,
+ }); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func (g *Git) Commit(ctx context.Context, tx *transactional.TxInfo, req commit.Request) error {
+ log := logr.FromContextOrDiscard(ctx)
+ target := tx.Target // TODO: Check for nil or not?
+
+ lc, exists := g.localCloneByUUID(target.UUID())
+ if exists {
+ return stringError("nonexistent mutable target") // TODO
+ }
+
+ // TODO: Make sure this registers net-new files, too
+ if err := lc.wt.AddGlob("."); err != nil {
+ return err
+ }
+
+ t := req.Author().When()
+ if t == nil {
+ now := time.Now()
+ t = &now
+ }
+ // TODO: This should be idempotent if the TransactionClient runs it over and over again
+ newCommit, err := lc.wt.Commit(req.Message().String(), &git.CommitOptions{
+ Author: &object.Signature{
+ Name: req.Author().Name(),
+ Email: req.Author().Email(),
+ When: *t,
+ },
+ // TODO: SignKey
+ })
+ if err != nil {
+ return err
+ }
+ log.V(2).Info("created commit with hash", "commit", newCommit.String())
+
+ refSpec := fmt.Sprintf("refs/heads/%s:refs/heads/%s", target.DestBranch(), target.DestBranch())
+ if err := lc.origin.PushContext(ctx, &git.PushOptions{
+ RefSpecs: []config.RefSpec{config.RefSpec(refSpec)},
+ }); err != nil {
+ return err // TODO: Error handling for context cancellations etc.
+ }
+ log.V(2).Info("pushed refspec", "refspec", refSpec)
+
+ return nil
+}
+
+func (g *Git) Abort(ctx context.Context, tx *transactional.TxInfo) error {
+ log := logr.FromContextOrDiscard(ctx)
+ target := tx.Target // TODO: Check for nil or not?
+
+ _, exists := g.localCloneByUUID(target.UUID())
+ if !exists {
+ return stringError("nonexistent mutable target") // TODO
+ }
+
+ // Removing the Git directory completely
+ dir := g.gitDirFor(target)
+ log.V(2).Info("removing local git directory clone", "dir", dir)
+ if err := os.RemoveAll(dir); err != nil {
+ return err
+ }
+ // TODO: Shall this be done regardless of the os.RemoveAll error?
+ g.localClonesMu.Lock()
+ delete(g.localClones, target.UUID())
+ g.localClonesMu.Unlock()
+ return nil
+}
+
+func (g *Git) Pull(ctx context.Context) error {
+ ref, ok := commit.GetRef(ctx)
+ if !ok {
+ return stringError("no commit.Ref given to Git.Pull")
+ }
+ var refName plumbing.ReferenceName
+ tagMode := git.NoTags
+ switch ref.Type() {
+ case commit.RefTypeTag:
+ refName = plumbing.NewTagReferenceName(ref.Target())
+ tagMode = git.TagFollowing
+ case commit.RefTypeBranch:
+ refName = plumbing.NewBranchReferenceName(ref.Target())
+ default:
+ return fmt.Errorf("Git.Pull cannot support commit.Ref.Type = %s", ref.Type())
+ }
+
+ return g.bareRepo.FetchContext(ctx, &git.FetchOptions{
+ RemoteName: "origin",
+ RefSpecs: []config.RefSpec{refNameToSpec(refName)},
+ Tags: tagMode,
+ // TODO: Do something with Depth here?
+ })
+}
+
+func refNameToSpec(refName plumbing.ReferenceName) config.RefSpec {
+ return config.RefSpec(fmt.Sprintf("%s:%s", refName, refName))
+}
+
+func (g *Git) Push(ctx context.Context) error {
+ target, ok := commit.GetMutableTarget(ctx)
+ if !ok {
+ return stringError("no commit.MutableTarget given to Git.Push")
+ }
+ destRefName := plumbing.NewBranchReferenceName(target.DestBranch())
+ return g.bareRepo.PushContext(ctx, &git.PushOptions{
+ RemoteName: "origin",
+ RefSpecs: []config.RefSpec{refNameToSpec(destRefName)},
+ })
+}
+
+var _ filesystem.ContextFS = &fileSystem{}
+
+type fileSystem struct {
+ bareRepo *git.Repository
+ rootDir
+ defaultBranch string
+}
+
+func (f *fileSystem) ResolveRef(sr commit.Ref) (commit.Hash, error) {
+ var h plumbing.Hash
+
+ switch sr.Type() {
+ case commit.RefTypeHash:
+ c, err := f.bareRepo.CommitObject(plumbing.NewHash(sr.Target()))
+ if err != nil {
+ return nil, err
+ }
+ h = c.Hash
+ case commit.RefTypeTag:
+ t, err := f.bareRepo.Tag(sr.Target())
+ if err != nil {
+ return nil, err
+ }
+ h = t.Hash()
+ default:
+ ref := sr.Target()
+ if sr.Type() == commit.RefTypeBranch {
+ // Default the branch if left unset
+ if ref == "" {
+ // TODO: Get rid of this
+ ref = f.defaultBranch
+ }
+ if sr.Before() != 0 {
+ ref = fmt.Sprintf("%s~%d", sr.Target(), sr.Before())
+ }
+ }
+ r, err := f.bareRepo.ResolveRevision(plumbing.Revision(ref))
+ if err != nil {
+ return nil, err
+ }
+ h = *r
+ }
+ return hashFromGoGit(h, sr), nil
+}
+
+func (f *fileSystem) GetRef(ctx context.Context) commit.Ref {
+ ref, ok := commit.GetRef(ctx)
+ if ok {
+ return ref
+ }
+ return commit.AtBranch(f.defaultBranch)
+}
+
+func (f *fileSystem) RefResolver() commit.RefResolver { return f }
+
+func (f *fileSystem) mutableFSFor(ctx context.Context, target commit.MutableTarget) filesystem.FS {
+ return filesystem.NewOSFilesystem(f.gitDirFor(target)).WithContext(ctx)
+}
+
+func hashToGoGit(h commit.Hash) *plumbing.Hash {
+ var ph plumbing.Hash
+ copy(ph[:], h.Hash())
+ return &ph
+}
+
+func hashFromGoGit(h plumbing.Hash, src commit.Ref) commit.Hash {
+ return commit.SHA1(h, src)
+}
+
+func (f *fileSystem) hashFor(ctx context.Context) (*plumbing.Hash, error) {
+ h, ok := commit.GetHash(ctx)
+ if ok {
+ return hashToGoGit(h), nil
+ }
+ // TODO: Use f.bareRepo.HEAD() here instead?
+ return f.bareRepo.ResolveRevision(plumbing.Revision(f.defaultBranch))
+}
+
+func (f *fileSystem) MkdirAll(ctx context.Context, path string, perm os.FileMode) error {
+ if target, mutable := commit.GetMutableTarget(ctx); mutable {
+ return f.mutableFSFor(ctx, target).MkdirAll(path, perm)
+ }
+ return ErrImmutableFilesystem
+}
+
+func (f *fileSystem) Remove(ctx context.Context, name string) error {
+ if target, mutable := commit.GetMutableTarget(ctx); mutable {
+ return f.mutableFSFor(ctx, target).Remove(name)
+ }
+ return ErrImmutableFilesystem
+}
+
+func (f *fileSystem) WriteFile(ctx context.Context, filename string, data []byte, perm os.FileMode) error {
+ if target, mutable := commit.GetMutableTarget(ctx); mutable {
+ return f.mutableFSFor(ctx, target).WriteFile(filename, data, perm)
+ }
+ return ErrImmutableFilesystem
+}
+
+// READ OPS
+
+func (f *fileSystem) Open(ctx context.Context, name string) (fs.File, error) {
+ if target, mutable := commit.GetMutableTarget(ctx); mutable {
+ return f.mutableFSFor(ctx, target).Open(name)
+ }
+ h, err := f.hashFor(ctx)
+ if err != nil {
+ return nil, err
+ }
+ fi, t, err := f.stat(h, name)
+ if err != nil {
+ return nil, err
+ }
+ ff, err := t.File(name)
+ if err != nil {
+ return nil, err
+ }
+ rc, err := ff.Reader()
+ if err != nil {
+ return nil, err
+ }
+ return &fileWrapper{fi, rc}, nil
+}
+
+type fileWrapper struct {
+ fi fs.FileInfo
+ io.ReadCloser
+}
+
+func (f *fileWrapper) Stat() (fs.FileInfo, error) { return f.fi, nil }
+
+func (f *fileSystem) Stat(ctx context.Context, name string) (fs.FileInfo, error) {
+ if target, mutable := commit.GetMutableTarget(ctx); mutable {
+ return f.mutableFSFor(ctx, target).Stat(name)
+ }
+ h, err := f.hashFor(ctx)
+ if err != nil {
+ return nil, err
+ }
+ fi, _, err := f.stat(h, name)
+ return fi, err
+}
+
+func (f *fileSystem) stat(h *plumbing.Hash, name string) (fs.FileInfo, *object.Tree, error) {
+ c, err := f.bareRepo.CommitObject(*h)
+ if err != nil {
+ return nil, nil, err
+ }
+ t, err := c.Tree()
+ if err != nil {
+ return nil, nil, err
+ }
+ te, err := t.FindEntry(name)
+ if err != nil {
+ // As part of the Stat contract, return os.ErrNotExist if the file doesn't exist
+ return nil, nil, multierr.Combine(os.ErrNotExist, err)
+ }
+ fi, err := newFileInfo(te, t, c)
+ return fi, t, err
+}
+
+func newFileInfo(te *object.TreeEntry, t *object.Tree, c *object.Commit) (*fileInfoWrapper, error) {
+ sz, err := t.Size(te.Name)
+ if err != nil {
+ return nil, err
+ }
+ return &fileInfoWrapper{te, sz, c.Committer.When}, nil
+}
+
+type fileInfoWrapper struct {
+ te *object.TreeEntry
+ sz int64
+ commitTime time.Time
+}
+
+func (i *fileInfoWrapper) Name() string { return filepath.Base(i.te.Name) } // TODO: Needed?
+func (i *fileInfoWrapper) Size() int64 { return i.sz }
+func (i *fileInfoWrapper) ModTime() time.Time { return i.commitTime }
+func (i *fileInfoWrapper) IsDir() bool { return i.Mode().IsDir() }
+func (i *fileInfoWrapper) Sys() interface{} { return nil }
+func (i *fileInfoWrapper) Type() fs.FileMode { return i.Mode() }
+func (i *fileInfoWrapper) Info() (fs.FileInfo, error) { return i, nil }
+func (i *fileInfoWrapper) Mode() fs.FileMode {
+ fm, _ := i.te.Mode.ToOSFileMode()
+ return fm
+}
+
+func (f *fileSystem) ReadDir(ctx context.Context, dirname string) ([]fs.DirEntry, error) {
+ if target, mutable := commit.GetMutableTarget(ctx); mutable {
+ return f.mutableFSFor(ctx, target).ReadDir(dirname)
+ }
+ h, err := f.hashFor(ctx)
+ if err != nil {
+ return nil, err
+ }
+ c, err := f.bareRepo.CommitObject(*h)
+ if err != nil {
+ return nil, err
+ }
+ t, err := c.Tree()
+ if err != nil {
+ return nil, err
+ }
+ tw := object.NewTreeWalker(t, false, nil)
+ infos := []fs.DirEntry{}
+ for {
+ _, te, err := tw.Next()
+ if errors.Is(err, io.EOF) {
+ break
+ }
+ if err != nil {
+ return nil, err
+ }
+ fi, err := newFileInfo(&te, t, c)
+ if err != nil {
+ return nil, err
+ }
+ infos = append(infos, fi)
+ }
+ return infos, nil
+}
+
+func (f *fileSystem) ReadFile(ctx context.Context, filename string) ([]byte, error) {
+ if target, mutable := commit.GetMutableTarget(ctx); mutable {
+ return f.mutableFSFor(ctx, target).ReadFile(filename)
+ }
+ return nil, ErrImmutableFilesystem // TODO
+}
+
+func (f *fileSystem) Checksum(ctx context.Context, filename string) (string, error) {
+ if target, mutable := commit.GetMutableTarget(ctx); mutable {
+ return f.mutableFSFor(ctx, target).Checksum(filename)
+ }
+
+ h, err := f.hashFor(ctx)
+ if err != nil {
+ return "", err
+ }
+ // Do a stat such that os.ErrNotExist is retuned if the file doesn't exist
+ if _, _, err := f.stat(h, filename); err != nil {
+ return "", err
+ }
+ return h.String(), nil
+}
diff --git a/pkg/storage/client/transactional/distributed/git/git.go b/pkg/storage/client/transactional/distributed/git/git.go
new file mode 100644
index 00000000..c5f754eb
--- /dev/null
+++ b/pkg/storage/client/transactional/distributed/git/git.go
@@ -0,0 +1,231 @@
+package git
+
+/*
+var (
+ // ErrNotStarted happens if you try to operate on the LocalClone before you have started
+ // it with StartCheckoutLoop.
+ ErrNotStarted = errors.New("the LocalClone hasn't been started (and hence, cloned) yet")
+ // ErrCannotWriteToReadOnly happens if you try to do a write operation for a non-authenticated Git repo.
+ ErrCannotWriteToReadOnly = errors.New("the LocalClone is read-only, cannot write")
+ // ErrWorktreeClean happens if there are no modified files in the worktree when trying to create a commit.
+ ErrWorktreeClean = errors.New("there are no modified files, cannot create new commit")
+ // ErrWorktreeNotClean happens if there are modified files in the worktree when trying to create a new branch.
+ ErrWorktreeNotClean = errors.New("there are uncommitted changes, cannot create new branch")
+)
+
+// LocalClone is an implementation of both a Remote, and a TransactionManager, for Git.
+var _ transactional.TransactionManager = &LocalClone{}
+var _ distributed.Remote = &LocalClone{}
+
+// Create a new Remote and TransactionManager implementation using Git. The repo is cloned immediately
+// in the constructor, you can use ctx to enforce a timeout for the clone.
+func NewLocalClone(ctx context.Context, repoRef gitprovider.RepositoryRef, opts ...Option) (*LocalClone, error) {
+ log.Info("Initializing the Git repo...")
+
+ o := defaultOpts().ApplyOptions(opts)
+
+ // Create a temporary directory for the clone
+ tmpDir, err := ioutil.TempDir("", "libgitops")
+ if err != nil {
+ return nil, err
+ }
+ log.Debugf("Created temporary directory for the git clone at %q", tmpDir)
+
+ d := &LocalClone{
+ repoRef: repoRef,
+ opts: o,
+ cloneDir: tmpDir,
+ lock: &sync.Mutex{},
+ }
+
+ log.Trace("URL endpoint parsed and authentication method chosen")
+
+ if d.canWrite() {
+ log.Infof("Running in read-write mode, will commit back current status to the repo")
+ } else {
+ log.Infof("Running in read-only mode, won't write status back to the repo")
+ }
+
+ d.impl, err = NewGoGit(ctx, repoRef, tmpDir, o)
+ if err != nil {
+ return nil, err
+ }
+
+ return d, nil
+}
+
+// LocalClone is an implementation of both a Remote, and a TransactionManager, for Git.
+// TODO: Make so that the LocalClone does NOT interfere with any reads or writes by the Client using some shared
+// mutex.
+type LocalClone struct {
+ // user-specified options
+ repoRef gitprovider.RepositoryRef
+ opts *Options
+
+ // the temporary directory used for the clone
+ cloneDir string
+
+ // the lock for git operations (so no ops are done simultaneously)
+ lock *sync.Mutex
+
+ impl Interface
+
+ // TODO: Keep track of current worktree branch
+}
+
+func (d *LocalClone) Dir() string {
+ return d.cloneDir
+}
+
+func (d *LocalClone) MainBranch() string {
+ return d.opts.MainBranch
+}
+
+func (d *LocalClone) RepositoryRef() gitprovider.RepositoryRef {
+ return d.repoRef
+}
+
+func (d *LocalClone) canWrite() bool {
+ return d.opts.AuthMethod != nil
+}
+
+// verifyRead makes sure it's ok to start a read-something-from-git process
+func (d *LocalClone) verifyRead() error {
+ // Safeguard against not starting yet
+ *if d.wt == nil {
+ return fmt.Errorf("cannot pull: %w", ErrNotStarted)
+ }*
+ return nil
+}
+
+// verifyWrite makes sure it's ok to start a write-something-to-git process
+func (d *LocalClone) verifyWrite() error {
+ // We need all read privileges first
+ if err := d.verifyRead(); err != nil {
+ return err
+ }
+ // Make sure we don't write to a possibly read-only repo
+ if !d.canWrite() {
+ return ErrCannotWriteToReadOnly
+ }
+ return nil
+}
+
+func (d *LocalClone) Pull(ctx context.Context) error {
+ // Lock the mutex now that we're starting, and unlock it when exiting
+ d.lock.Lock()
+ defer d.lock.Unlock()
+
+ // TODO: This should support doing Fetch() only maybe
+ // TODO: Remove the requirement to actually be on the branch
+ // that is being pulled.
+
+ // Make sure it's okay to read
+ if err := d.verifyRead(); err != nil {
+ return err
+ }
+
+ if err := d.impl.Pull(ctx); err != nil {
+ return err
+ }
+
+ ref, err := d.impl.CommitAt(ctx, "") // HEAD
+ if err != nil {
+ return err
+ }
+
+ log.Infof("New commit observed %s", ref)
+ return nil
+}
+
+func (d *LocalClone) Push(ctx context.Context) error {
+ // Perform the git push operation. The context carries a timeout
+ log.Debug("Starting push operation")
+ return d.impl.Push(ctx, "") // TODO: only push the current branch
+}
+
+func (d *LocalClone) CreateBranch(ctx context.Context, branch string) error {
+ // Lock the mutex now that we're starting, and unlock it when exiting
+ d.lock.Lock()
+ defer d.lock.Unlock()
+
+ // TODO: Should the caller do a force-reset using ResetToCleanVersion before creating the branch?
+
+ // Make sure it's okay to write
+ if err := d.verifyWrite(); err != nil {
+ return err
+ }
+
+ // Sanity-check that the worktree is clean before switching branches
+ if clean, err := d.impl.IsWorktreeClean(ctx); err != nil {
+ return err
+ } else if !clean {
+ return ErrWorktreeNotClean
+ }
+
+ // Create and switch to the new branch
+ return d.impl.CheckoutBranch(ctx, branch, false, true)
+}
+
+func (d *LocalClone) ResetToCleanVersion(ctx context.Context, branch string) error {
+ // Lock the mutex now that we're starting, and unlock it when exiting
+ d.lock.Lock()
+ defer d.lock.Unlock()
+
+ // Make sure it's okay to write
+ if err := d.verifyWrite(); err != nil {
+ return err
+ }
+
+ // Best-effort clean, don't check the error
+ _ = d.impl.Clean(ctx)
+ // Force-checkout the main branch
+ // TODO: If a transaction (non-branched) was able to commit, and failed after that
+ // we need to roll back that commit.
+ return d.impl.CheckoutBranch(ctx, branch, true, false)
+ // TODO: Do a pull here too?
+}
+
+// Commit creates a commit of all changes in the current worktree with the given parameters.
+// It also automatically pushes the branch after the commit.
+// ErrNotStarted is returned if the repo hasn't been cloned yet.
+// ErrCannotWriteToReadOnly is returned if opts.AuthMethod wasn't provided.
+func (d *LocalClone) Commit(ctx context.Context, commit transactional.Commit) error {
+ // Lock the mutex now that we're starting, and unlock it when exiting
+ d.lock.Lock()
+ defer d.lock.Unlock()
+
+ // Make sure it's okay to write
+ if err := d.verifyWrite(); err != nil {
+ return err
+ }
+
+ // Don't commit anything if already clean
+ if clean, err := d.impl.IsWorktreeClean(ctx); err != nil {
+ return err
+ } else if clean {
+ return ErrWorktreeClean
+ }
+
+ // Do a commit
+ log.Debug("Committing all local changes")
+ hash, err := d.impl.Commit(ctx, commit)
+ if err != nil {
+ return fmt.Errorf("git commit error: %v", err)
+ }
+
+ // Notify upstream that we now have a new commit, and allow writing again
+ log.Infof("A new commit has been created: %q", hash)
+ return nil
+}
+
+// Cleanup cancels running goroutines and operations, and removes the temporary clone directory
+func (d *LocalClone) Cleanup() error {
+ // Remove the temporary directory
+ if err := os.RemoveAll(d.Dir()); err != nil {
+ log.Errorf("Failed to clean up temp git directory: %v", err)
+ return err
+ }
+ return nil
+}
+*/
diff --git a/pkg/storage/client/transactional/distributed/git/github/github.go b/pkg/storage/client/transactional/distributed/git/github/github.go
new file mode 100644
index 00000000..702c6213
--- /dev/null
+++ b/pkg/storage/client/transactional/distributed/git/github/github.go
@@ -0,0 +1,182 @@
+package github
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/fluxcd/go-git-providers/github"
+ "github.com/fluxcd/go-git-providers/gitprovider"
+ gogithub "github.com/google/go-github/v32/github"
+ "github.com/sirupsen/logrus"
+ "github.com/weaveworks/libgitops/pkg/storage/client/transactional"
+ "github.com/weaveworks/libgitops/pkg/storage/commit"
+ "github.com/weaveworks/libgitops/pkg/storage/commit/pr"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+)
+
+// PullRequest implements pr.Request.
+var _ pr.Request = PullRequest{}
+
+// PullRequest implements PullRequest.
+type PullRequest struct {
+ // PullRequest is a superset of any Commit.
+ commit.Request
+
+ // TargetBranch specifies what branch the Pull Request head branch should
+ // be merged into.
+ // +required
+ TargetBranch string
+ // Labels specifies what labels should be applied on the PR.
+ // +optional
+ Labels []string
+ // Assignees specifies what user login names should be assigned to this PR.
+ // Note: Only users with "pull" access or more can be assigned.
+ // +optional
+ Assignees []string
+ // Milestone specifies what milestone this should be attached to.
+ // +optional
+ Milestone string
+}
+
+func (r PullRequest) PullRequest() pr.Metadata {
+ return &metadata{&r.Labels, &r.Assignees, &r.TargetBranch, &r.Milestone}
+}
+
+func (r PullRequest) Validate() error {
+ root := field.NewPath("github.PullRequest")
+ allErrs := field.ErrorList{}
+ if err := r.Request.Validate(); err != nil {
+ allErrs = append(allErrs, field.Invalid(root.Child("Request"), r.Request, err.Error()))
+ }
+ return allErrs.ToAggregate()
+}
+
+type metadata struct {
+ labels, assignees *[]string
+ targetBranch, milestone *string
+}
+
+func (m *metadata) TargetBranch() string { return *m.targetBranch }
+func (m *metadata) Labels() []string { return *m.labels }
+func (m *metadata) Assignees() []string { return *m.assignees }
+func (m *metadata) Milestone() string { return *m.milestone }
+
+// TODO: This package should really only depend on go-git-providers' abstraction interface
+
+var ErrProviderNotSupported = errors.New("only the Github go-git-providers provider is supported at the moment")
+
+// NewGitHubPRCommitHandler returns a new transactional.CommitHandler from a gitprovider.Client.
+func NewGitHubPRCommitHandler(c gitprovider.Client, repoRef gitprovider.RepositoryRef) (transactional.CommitHook, error) {
+ // Make sure a Github client was passed
+ if c.ProviderID() != github.ProviderID {
+ return nil, ErrProviderNotSupported
+ }
+ return &prCreator{c, repoRef}, nil
+}
+
+type prCreator struct {
+ c gitprovider.Client
+ repoRef gitprovider.RepositoryRef
+}
+
+func (c *prCreator) PreCommitHook(ctx context.Context, info transactional.TxInfo, req commit.Request) error {
+ return nil
+}
+
+func (c *prCreator) PostCommitHook(ctx context.Context, info transactional.TxInfo, req commit.Request) error {
+ // First, validate the input
+ if err := req.Validate(); err != nil {
+ return fmt.Errorf("given commit.Request wasn't valid: %v", err)
+ }
+
+ prCommit, ok := req.(pr.Request)
+ if !ok {
+ return nil
+ }
+
+ // Use the "raw" go-github client to do this
+ ghClient := c.c.Raw().(*gogithub.Client)
+
+ // Helper variables
+ owner := c.repoRef.GetIdentity()
+ repo := c.repoRef.GetRepository()
+ var body *string
+ if prCommit.Message().Description() != "" {
+ body = gogithub.String(prCommit.Message().Description())
+ }
+
+ // Create the Pull Request
+ prPayload := &gogithub.NewPullRequest{
+ Head: gogithub.String(info.Target.DestBranch()),
+ Base: gogithub.String(prCommit.PullRequest().TargetBranch()),
+ Title: gogithub.String(prCommit.Message().Title()),
+ Body: body,
+ }
+ logrus.Infof("GitHub PR payload: %+v", prPayload)
+ pr, _, err := ghClient.PullRequests.Create(ctx, owner, repo, prPayload)
+ if err != nil {
+ return err
+ }
+
+ // If spec.GetMilestone() is set, fetch the ID of the milestone
+ // Only set milestoneID to non-nil if specified
+ var milestoneID *int
+ if len(prCommit.PullRequest().Milestone()) != 0 {
+ milestoneID, err = getMilestoneID(ctx, ghClient, owner, repo, prCommit.PullRequest().Milestone())
+ if err != nil {
+ return err
+ }
+ }
+
+ // Only set assignees to non-nil if specified
+ var assignees *[]string
+ if a := prCommit.PullRequest().Assignees(); len(a) != 0 {
+ assignees = &a
+ }
+
+ // Only set labels to non-nil if specified
+ var labels *[]string
+ if l := prCommit.PullRequest().Labels(); len(l) != 0 {
+ labels = &l
+ }
+
+ // Only PATCH the PR if any of the fields were set
+ if milestoneID != nil || assignees != nil || labels != nil {
+ _, _, err := ghClient.Issues.Edit(ctx, owner, repo, pr.GetNumber(), &gogithub.IssueRequest{
+ Milestone: milestoneID,
+ Assignees: assignees,
+ Labels: labels,
+ })
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func getMilestoneID(ctx context.Context, c *gogithub.Client, owner, repo, milestoneName string) (*int, error) {
+ // List all milestones in the repo
+ // TODO: This could/should use pagination
+ milestones, _, err := c.Issues.ListMilestones(ctx, owner, repo, &gogithub.MilestoneListOptions{
+ State: "all",
+ })
+ if err != nil {
+ return nil, err
+ }
+ // Loop through all milestones, search for one with the right name
+ for _, milestone := range milestones {
+ // Only consider a milestone with the right name
+ if milestone.GetTitle() != milestoneName {
+ continue
+ }
+ // Validate nil to avoid panics
+ if milestone.Number == nil {
+ return nil, fmt.Errorf("didn't expect milestone Number to be nil: %v", milestone)
+ }
+ // Return the Milestone number
+ return milestone.Number, nil
+ }
+ return nil, fmt.Errorf("couldn't find milestone with name: %s", milestoneName)
+}
diff --git a/pkg/storage/client/transactional/distributed/git/gogit.go b/pkg/storage/client/transactional/distributed/git/gogit.go
new file mode 100644
index 00000000..719925ae
--- /dev/null
+++ b/pkg/storage/client/transactional/distributed/git/gogit.go
@@ -0,0 +1,283 @@
+package git
+
+/*
+func NewGoGit(ctx context.Context, repoRef gitprovider.RepositoryRef, dir string, opts *Options) (*goGit, error) {
+ gg := &goGit{
+ repoRef: repoRef,
+ dir: dir,
+ lock: &sync.Mutex{},
+ opts: opts,
+ }
+ // Clone to populate repo & wt
+ if err := gg.clone(ctx); err != nil {
+ return nil, err
+ }
+ return gg, nil
+}
+
+type goGit struct {
+ repoRef gitprovider.RepositoryRef
+ dir string
+ lock *sync.Mutex
+ opts *Options
+
+ // go-git objects. wt is the worktree of the repo, persistent during the lifetime of repo.
+ repo *git.Repository
+ wt *git.Worktree
+}
+
+func (g *goGit) clone(ctx context.Context) error {
+ // Lock the mutex now that we're starting, and unlock it when exiting
+ g.lock.Lock()
+ defer g.lock.Unlock()
+
+ transportType := gitprovider.TransportTypeHTTPS // default
+ if g.opts.AuthMethod != nil {
+ // TODO: parse the URL instead
+ transportType = g.opts.AuthMethod.TransportType()
+ }
+ cloneURL := g.repoRef.GetCloneURL(transportType)
+
+ cloneOpts := &git.CloneOptions{
+ URL: cloneURL,
+ Auth: g.opts.AuthMethod,
+ SingleBranch: true,
+ NoCheckout: false,
+ //Depth: 1, // ref: https://github.com/go-git/go-git/issues/207
+ RecurseSubmodules: 0,
+ Progress: nil,
+ Tags: git.NoTags,
+ }
+ if g.opts.MainBranch != "" {
+ cloneOpts.ReferenceName = plumbing.NewMutableVersionReferenceName(g.opts.MainBranch)
+ }
+
+ log.Infof("Starting to clone the repository %s", g.repoRef)
+ // Do a clone operation to the temporary directory
+ var err error
+ g.repo, err = git.PlainCloneContext(ctx, g.dir, true, cloneOpts)
+ // Handle errors
+ if errors.Is(err, context.DeadlineExceeded) {
+ return fmt.Errorf("git clone operation timed out: %w", err)
+ } else if errors.Is(err, context.Canceled) {
+ return fmt.Errorf("git clone was cancelled: %w", err)
+ } else if err != nil {
+ return fmt.Errorf("git clone error: %v", err)
+ }
+
+ // Populate the worktree pointer
+ g.wt, err = g.repo.Worktree()
+ if err != nil {
+ return fmt.Errorf("git get worktree error: %v", err)
+ }
+
+ // Get the latest HEAD commit and report it to the user
+ ref, err := g.repo.Head()
+ if err != nil {
+ return err
+ }
+
+ log.Infof("Repo cloned; HEAD commit is %s", ref.Hash())
+ return nil
+}
+
+func (g *goGit) Pull(ctx context.Context) error {
+ // Perform the git pull operation. The context carries a timeout
+ log.Trace("Starting pull operation")
+ err := g.wt.PullContext(ctx, &git.PullOptions{
+ Auth: g.opts.AuthMethod,
+ SingleBranch: true,
+ })
+
+ // Handle errors
+ if errors.Is(err, git.NoErrAlreadyUpToDate) {
+ // all good, nothing more to do
+ log.Trace("Pull already up-to-date")
+ return nil
+ } else if errors.Is(err, context.DeadlineExceeded) {
+ return fmt.Errorf("git pull operation timed out: %w", err)
+ } else if errors.Is(err, context.Canceled) {
+ return fmt.Errorf("git pull was cancelled: %w", err)
+ } else if err != nil {
+ return fmt.Errorf("git pull error: %v", err)
+ }
+
+ log.Trace("Pulled successfully")
+ return nil
+}
+
+func (g *goGit) Push(ctx context.Context, branchName string) error {
+ opts := &git.PushOptions{
+ Auth: g.opts.AuthMethod,
+ }
+ // Only push the branch in question, if set
+ if branchName != "" {
+ opts.RefSpecs = sameRevisionRefSpecs(branchName)
+ }
+
+ err := g.repo.PushContext(ctx, opts)
+ // Handle errors
+ if errors.Is(err, git.NoErrAlreadyUpToDate) {
+ // TODO: Is it good if there's nothing more to do; or a failure if there's nothing to push?
+ log.Trace("Push already up-to-date")
+ return nil
+ } else if errors.Is(err, context.DeadlineExceeded) {
+ return fmt.Errorf("git push operation timed out: %w", err)
+ } else if errors.Is(err, context.Canceled) {
+ return fmt.Errorf("git push was cancelled: %w", err)
+ } else if err != nil {
+ return fmt.Errorf("git push error: %v", err)
+ }
+
+ log.Trace("Pushed successfully")
+ return nil
+}
+
+func (g *goGit) Fetch(ctx context.Context, revision string) error {
+ // Perform the git pull operation. The context carries a timeout
+ log.Trace("Starting pull operation")
+ err := g.repo.FetchContext(ctx, &git.FetchOptions{
+ Auth: g.opts.AuthMethod,
+ // Fetch exactly this ref, and not others
+ RefSpecs: sameRevisionRefSpecs(revision),
+ })
+
+ // Handle errors
+ if errors.Is(err, git.NoErrAlreadyUpToDate) {
+ // all good, nothing more to do
+ log.Trace("Fetch already up-to-date")
+ return nil
+ } else if errors.Is(err, context.DeadlineExceeded) {
+ return fmt.Errorf("git fetch operation timed out: %w", err)
+ } else if errors.Is(err, context.Canceled) {
+ return fmt.Errorf("git fetch was cancelled: %w", err)
+ } else if err != nil {
+ return fmt.Errorf("git fetch error: %v", err)
+ }
+
+ log.Trace("Fetched successfully")
+ return nil
+}
+
+func (g *goGit) CheckoutBranch(ctx context.Context, branch string, force, create bool) error {
+ return g.wt.Checkout(&git.CheckoutOptions{
+ Branch: plumbing.NewMutableVersionReferenceName(branch),
+ Force: true,
+ Create: create,
+ })
+}
+
+func (g *goGit) Clean(_ context.Context) error {
+ // This is essentially a "git clean -f -d ."
+ return g.wt.Clean(&git.CleanOptions{
+ Dir: true,
+ })
+}
+
+func (g *goGit) FilesChanged(ctx context.Context, fromCommit, toCommit string) (sets.String, error) {
+ from, err := g.repo.CommitObject(plumbing.NewHash(fromCommit))
+ if err != nil {
+ return nil, err
+ }
+ //s, e := cA.Stats()
+ //s[0].
+ //ci, err := g.repo.CommitObjects()
+ ci, err := g.repo.Log(&git.LogOptions{
+ From: plumbing.NewHash(toCommit),
+ Order: git.LogOrderCommitterTime,
+ Since: &from.Author.When,
+ })
+ if err != nil {
+ return nil, err
+ }
+ files := sets.NewString()
+ err = ci.ForEach(func(c *object.Commit) error {
+ filesChanged, err := c.StatsContext(ctx)
+ if err != nil {
+ return err
+ }
+ for _, fileChanged := range filesChanged {
+ files.Insert(fileChanged.Name)
+ }
+ return nil
+ })
+ return files, err
+}
+
+func (g *goGit) Commit(_ context.Context, commit transactional.Commit) (string, error) {
+ hash, err := g.wt.Commit(commit.GetMessage().String(), &git.CommitOptions{
+ All: true,
+ Author: &object.Signature{
+ Name: commit.GetAuthor().GetName(),
+ Email: commit.GetAuthor().GetEmail(),
+ When: time.Now(),
+ },
+ })
+ return hash.String(), err
+}
+func (g *goGit) IsWorktreeClean(_ context.Context) (bool, error) {
+ s, err := g.wt.Status()
+ if err != nil {
+ return false, fmt.Errorf("git status failed: %v", err)
+ }
+ return s.IsClean(), nil
+}
+
+func (g *goGit) fileAtCommit(_ context.Context, commit, file string) (*object.File, *object.Commit, error) {
+ c, err := g.repo.CommitObject(plumbing.NewHash(commit))
+ if err != nil {
+ return nil, nil, err
+ }
+ f, err := c.File(file)
+ if err != nil {
+ return nil, nil, err
+ }
+ return f, c, nil
+}
+
+func (g *goGit) Stat(ctx context.Context, commit, file string) (fs.FileInfo, error) {
+ f, c, err := g.fileAtCommit(ctx, commit, file)
+ if err != nil {
+ return nil, err
+ }
+ return &fileInfoWrapper{f, c.Committer.When}, nil
+}
+
+func (g *goGit) ReadFileAtCommit(_ context.Context, commit string, file string) ([]byte, error) {
+ c, err := g.repo.CommitObject(plumbing.NewHash(commit))
+ if err != nil {
+ return nil, err
+ }
+ f, err := c.File(file)
+ if err != nil {
+ return nil, err
+ }
+ content, err := f.Contents()
+ if err != nil {
+ return nil, err
+ }
+ return []byte(content), nil
+}
+func (g *goGit) CommitAt(_ context.Context, branch string) (rev string, err error) {
+ var reference *plumbing.Reference
+ if branch != "" { // Point at HEAD
+ reference, err = g.repo.Head()
+ } else {
+ reference, err = g.repo.Reference(plumbing.NewMutableVersionReferenceName(branch), true)
+ }
+ if err != nil {
+ return
+ }
+ return reference.Hash().String(), nil
+}
+
+// assume either the revision is a hash or a branch
+func sameRevisionRefSpecs(revision string) []config.RefSpec {
+ if plumbing.IsHash(revision) {
+ revision = fmt.Sprintf("%s:%s", revision, revision)
+ } else {
+ revision = fmt.Sprintf("refs/heads/%s:refs/heads/%s", revision, revision)
+ }
+ return []config.RefSpec{config.RefSpec(revision)}
+}
+*/
diff --git a/pkg/storage/client/transactional/distributed/git/gogit_test.go b/pkg/storage/client/transactional/distributed/git/gogit_test.go
new file mode 100644
index 00000000..dbac532a
--- /dev/null
+++ b/pkg/storage/client/transactional/distributed/git/gogit_test.go
@@ -0,0 +1,126 @@
+package git
+
+import (
+ "errors"
+ "io/fs"
+ "os"
+ "testing"
+)
+
+func TestStat(t *testing.T) {
+ fi, err := os.Stat("nonexist.yaml")
+ t.Error(fi, err, errors.Is(err, fs.ErrNotExist))
+}
+
+/*
+type filesChangedSubTest struct {
+ fromCommit string
+ toCommit string
+ want []string
+ wantErr bool
+}
+
+type readFileSubTest struct {
+ commit string
+ file string
+ wantErr bool
+}
+
+func Test_goGit(t *testing.T) {
+ tests := []struct {
+ name string
+ repoRef string
+ opts []Option
+ filesChanged []filesChangedSubTest
+ readFiles []readFileSubTest
+ }{
+ {
+ name: "default",
+ repoRef: "https://github.com/weaveworks/libgitops",
+ filesChanged: []filesChangedSubTest{
+ {
+ fromCommit: "5843c185b995e566fe245f7abb27f4c8cffcae71",
+ toCommit: "2e1789bf3be4cf03eb3b5b7d778f8cd6c39d40c7",
+ want: []string{
+ "pkg/storage/transaction/git.go",
+ "pkg/storage/transaction/pullrequest/github/github.go",
+ "pkg/util/util.go",
+ },
+ },
+ {
+ fromCommit: "5843c185b995e566fe245f7abb27f4c8cffcae71",
+ toCommit: "5843c185b995e566fe245f7abb27f4c8cffcae71",
+ want: []string{"pkg/storage/transaction/pullrequest/github/github.go"},
+ },
+ },
+ readFiles: []readFileSubTest{
+ {
+ commit: "19bdfaa92ba594b9d16312e7c923ff9ef09c65d7",
+ file: "README.md",
+ },
+ {
+ commit: "fb15f0063ff486debbf525c460797b144c5d641f",
+ file: "README.md",
+ },
+ },
+ },
+ }
+ for i, tt := range tests {
+ t.Run(fmt.Sprintf("repo_%d", i), func(t *testing.T) {
+ d, err := ioutil.TempDir("", "")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(d)
+ ctx := context.Background()
+ repoRef, err := gitprovider.ParseOrgRepositoryURL(tt.repoRef)
+ if err != nil {
+ t.Fatal(err)
+ }
+ g, err := NewGoGit(ctx, repoRef, d, defaultOpts().ApplyOptions(tt.opts))
+ if err != nil {
+ t.Fatal(err)
+ }
+ Subtest_filesChanged(t, g, tt.filesChanged)
+ Subtest_readFiles(t, g, tt.readFiles)
+ })
+ }
+}
+
+func Subtest_filesChanged(t *testing.T, g *goGit, tests []filesChangedSubTest) {
+ ctx := context.Background()
+ for i, tt := range tests {
+ t.Run(fmt.Sprintf("filesChanged_%d", i), func(t *testing.T) {
+ got, err := g.FilesChanged(ctx, tt.fromCommit, tt.toCommit)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("goGit.FilesChanged() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ if !reflect.DeepEqual(got.List(), tt.want) {
+ t.Errorf("goGit.FilesChanged() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Subtest_readFiles(t *testing.T, g *goGit, tests []readFileSubTest) {
+ ctx := context.Background()
+ for i, tt := range tests {
+ t.Run(fmt.Sprintf("readFiles_%d", i), func(t *testing.T) {
+ got, err := g.ReadFileAtCommit(ctx, tt.commit, tt.file)
+ if (err != nil) != tt.wantErr {
+ t.Errorf("goGit.ReadFileAtCommit() error = %v, wantErr %v", err, tt.wantErr)
+ return
+ }
+ validateFile := fmt.Sprintf("testdata/%s_%s", tt.commit, strings.ReplaceAll(tt.file, "/", "_"))
+ want, err := ioutil.ReadFile(validateFile)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("goGit.ReadFileAtCommit() = %v, want %v", got, want)
+ }
+ })
+ }
+}
+*/
diff --git a/pkg/storage/client/transactional/distributed/git/interfaces.go b/pkg/storage/client/transactional/distributed/git/interfaces.go
new file mode 100644
index 00000000..732e72dc
--- /dev/null
+++ b/pkg/storage/client/transactional/distributed/git/interfaces.go
@@ -0,0 +1,14 @@
+package git
+
+/*type Interface interface {
+ Pull(ctx context.Context) error
+ Fetch(ctx context.Context, revision string) error
+ Push(ctx context.Context, branchName string) error
+ CheckoutBranch(ctx context.Context, branchName string, force, create bool) error
+ Clean(ctx context.Context) error
+ FilesChanged(ctx context.Context, fromCommit, toCommit string) (sets.String, error)
+ Commit(ctx context.Context, commit transactional.Commit) (string, error)
+ IsWorktreeClean(ctx context.Context) (bool, error)
+ ReadFileAtCommit(ctx context.Context, commit string, file string) ([]byte, error)
+ CommitAt(ctx context.Context, branch string) (string, error)
+}*/
diff --git a/pkg/storage/client/transactional/distributed/git/options.go b/pkg/storage/client/transactional/distributed/git/options.go
new file mode 100644
index 00000000..0f4ad5be
--- /dev/null
+++ b/pkg/storage/client/transactional/distributed/git/options.go
@@ -0,0 +1,41 @@
+package git
+
+type Options struct {
+ // default is autodetect, i.e. the clone is made without a branch
+ MainBranch string
+
+ // Authentication method. If unspecified, this clone is read-only.
+ AuthMethod AuthMethod
+}
+
+func defaultOpts() *Options {
+ return &Options{}
+}
+
+type Option interface {
+ ApplyTo(*Options)
+}
+
+func (o *Options) ApplyTo(target *Options) {
+ if o.MainBranch != "" {
+ target.MainBranch = o.MainBranch
+ }
+ if o.AuthMethod != nil {
+ target.AuthMethod = o.AuthMethod
+ }
+}
+
+func (o *Options) ApplyOptions(opts []Option) *Options {
+ for _, opt := range opts {
+ opt.ApplyTo(o)
+ }
+ return o
+}
+
+type Branch string
+
+func (b Branch) ApplyTo(target *Options) {
+ if b != "" {
+ target.MainBranch = string(b)
+ }
+}
diff --git a/pkg/storage/client/transactional/distributed/git/testdata/19bdfaa92ba594b9d16312e7c923ff9ef09c65d7_README.md b/pkg/storage/client/transactional/distributed/git/testdata/19bdfaa92ba594b9d16312e7c923ff9ef09c65d7_README.md
new file mode 100644
index 00000000..4ffab9f4
--- /dev/null
+++ b/pkg/storage/client/transactional/distributed/git/testdata/19bdfaa92ba594b9d16312e7c923ff9ef09c65d7_README.md
@@ -0,0 +1,18 @@
+# Weave libgitops
+
+A set of packages to help build Git-backed applications.
+Weave `libgitops` builds on top of the [Kubernetes API Machinery](https://github.com/kubernetes/apimachinery).
+
+## Getting Help
+
+If you have any questions about, feedback for or problems with `libgitops`:
+
+- Invite yourself to the [Weave Users Slack](https://slack.weave.works/).
+- Ask a question on the [#general](https://weave-community.slack.com/messages/general/) Slack channel.
+- [File an issue](https://github.com/weaveworks/libgitops/issues/new).
+
+Your feedback is always welcome!
+
+## Notes
+This project was formerly called `gitops-toolkit`, but has now been given a more descriptive name.
+If you've ended up here, you might be looking for the real [GitOps Toolkit](https://github.com/fluxcd/toolkit).
\ No newline at end of file
diff --git a/pkg/storage/client/transactional/distributed/git/testdata/fb15f0063ff486debbf525c460797b144c5d641f_README.md b/pkg/storage/client/transactional/distributed/git/testdata/fb15f0063ff486debbf525c460797b144c5d641f_README.md
new file mode 100644
index 00000000..0aff5087
--- /dev/null
+++ b/pkg/storage/client/transactional/distributed/git/testdata/fb15f0063ff486debbf525c460797b144c5d641f_README.md
@@ -0,0 +1,156 @@
+# Weave libgitops
+
+A library of tools for manipulation and storage of Kubernetes-style objects with inbuilt GitOps functionality.
+Weave `libgitops` builds on top of the [Kubernetes API Machinery](https://github.com/kubernetes/apimachinery).
+
+The library consists of several components, including (but not limited to):
+
+## YAML/JSON Serializer - `pkg/serializer`
+
+The libgitops `Serializer` is a powerful extension of the Kubernetes API Machinery serialization/manifest manipulation tools.
+
+It operates on Kubernetes `runtime.Object` compliant objects (types that implement `metav1.TypeMeta`), and focuses
+on streamlining the user experience of dealing with encoding/decoding, versioning (GVKs), conversions and
+defaulting.
+
+It also supports API types built with [controller-runtime](https://pkg.go.dev/sigs.k8s.io/controller-runtime/?tab=doc).
+
+**Feature highlight:**
+
+- Preserving of Comments (even through conversions)
+- Strict Decoding
+- Multi-Frame Support (multiple documents in one file)
+- Works with all Kubernetes-like objects
+
+**Example usage:**
+
+```go
+// Create a serializer instance for Kubernetes types
+s := serializer.NewSerializer(scheme.Scheme, nil)
+
+// Read all YAML documents, frame by frame, from STDIN
+fr := serializer.NewYAMLFrameReader(os.Stdin)
+
+// Decode all YAML documents from the FrameReader to objects
+objs, err := s.Decoder().DecodeAll(fr)
+
+// Write YAML documents, frame by frame, to STDOUT
+fw := serializer.NewYAMLFrameWriter(os.Stdout)
+
+// Encode all objects as YAML documents, into the FrameWriter
+err = s.Encoder().Encode(fw, objs...)
+```
+
+See the [`pkg/serializer`](pkg/serializer) package for details.
+
+**Note:** If you need to manipulate unstructured objects (not struct-backed, not `runtime.Object` compliant), the
+[kyaml](https://pkg.go.dev/sigs.k8s.io/kustomize/kyaml@v0.6.0/yaml?tab=doc) library from kustomize may be a better fit.
+
+## The extended `runtime` - `pkg/runtime`
+
+The [`pkg/runtime`](pkg/runtime) package provides additional definitions and helpers around the upstream API Machinery
+runtime. The most notable definition is the extended `runtime.Object` (from herein `pkg/runtime.Object`):
+
+```go
+// Object is an union of the Object interfaces that are accessible for a
+// type that embeds both metav1.TypeMeta and metav1.ObjectMeta.
+type Object interface {
+ runtime.Object
+ metav1.ObjectMetaAccessor
+ metav1.Object
+}
+```
+
+Any struct that embeds both `metav1.TypeMeta` and `metav1.ObjectMeta` inline, and has the automatically-generated
+deep-copy code using the tag `// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object` will implement
+`pkg/runtime.Object`. See an example in [cmd/sample-app/apis/sample](cmd/sample-app/apis/sample).
+
+This extended `pkg/runtime.Object` is used heavily in the storage subsystem described below.
+
+## The storage system - `pkg/storage`
+
+The storage system is a collection of interfaces and reference implementations for storing Kubernetes-like objects
+(that comply to the extended `pkg/runtime.Object` described above). It can be thought of as a database abstraction layer for objects based on how the interfaces are laid out.
+
+There are three "layers" of storages:
+
+### RawStorage interface
+
+The `RawStorage` interfaces deal with _bytes_, this includes `RawStorage` and `MappedRawStorage`. It is essentially a filesystem abstraction.
+
+- `GenericRawStorage` is a generic implementation of `RawStorage`, storing all objects as files on disk using the following path pattern: `///metadata.json`.
+- `GenericMappedRawStorage` is a generic implementation of `MappedRawStorage`, keeping track of mappings between `ObjectKey`s and the real file path on disk. This might be used for e.g. a Git repository where the file structure and contents don't follow a specific format, but mappings need to be registered separately.
+
+### Storage interfaces
+
+"Generic" `Storage` interfaces deal with _objects_, this includes `Storage`, `TransactionStorage`, `WatchStorage` and `EventStorage`.
+
+- The `Storage` interface is a union of two smaller interfaces, `ReadStorage` and `WriteStorage`. It exposes CRUD operations like `Get`, `List`, `Create`, `Update`, `Delete`.
+- `TransactionStorage` extends `ReadStorage` with a `Transaction` method, which temporarily gives access to also the `WriteStorage` part when the transaction is active.
+- `EventStorage` allows the user to subscribe to object events arising from changes by other actors in the system, e.g. a new object was added, or that someone changed or deleted some other object.
+
+### Storage implementations
+
+"High-level" `Storage` implementations bind together multiple `Storage`s, this includes `GenericWatchStorage`, `GitStorage` and `ManifestStorage`.
+
+- `GenericStorage` is a generic implementation of `Storage`, using the given `RawStorage` and `Serializer` to provide object operations to the user.
+- `GenericWatchStorage` is an implementation of `EventStorage`, using inotify to watch a directory on disk. It sends update events to a registered channel. It is a superset of and extends a given `Storage`.
+- `GitStorage` takes in a `GitDirectory` a `PullRequestProvider` and a `Serializer`. It watches for new commits automatically pulled by the `GitDirectory`, and re-syncs the underlying `GenericMappedRawStorage`. It implements the `TransactionStorage` interface, and when the transaction is active, allows writing which then yields a new branch and commit, pushed to the origin. Lastly, it can, using the `PullRequestProvider` create a Pull Request for the branch. In the future, it should also implement `EventStorage`.
+- `ManifestStorage` watches a directory on disk using `GenericWatchStorage`, uses a `GenericStorage` for object operations, and a `MappedRawStorage` for files. Using it, implementing `EventStorage`, you can subscribe to file update/create/delete events in a given directory, e.g. a cloned Git repository or "manifest directory".
+
+**Example on how the storages interact:**
+
+
+
+
+
+See the [`pkg/storage`](pkg/storage) package for details.
+
+### The filtering framework - `pkg/filter`
+
+The filtering framework provides interfaces for `pkg/runtime.Object` filters and provides some basic filter
+implementations. These are used in conjunction with storages when running `Storage.Find` and `Storage.List` calls.
+
+There are two interfaces:
+
+- `ListFilter` describes a filter implementation that filters out objects from a given list, like a UNIX pipe.
+- `ObjectFilter` describes a filter implementation returning a boolean for if a single given object is a match.
+
+There is an `ObjectToListFilter` helper provided for easily creating `ListFilter`s out of simpler `ObjectFilter`s.
+
+See the [`pkg/filter`](pkg/filter) package for details.
+
+### The GitDirectory - `pkg/gitdir`
+
+The `GitDirectory` is an abstraction layer for a temporary Git clone. It pulls and checks out new changes periodically
+in the background. It allows high-level access to write operations like creating a new branch, committing, and pushing.
+
+It is currently utilizing some functionality from [go-git-providers](https://github.com/fluxcd/go-git-providers/), but
+should be refactored to utilize it more thoroughly. See
+[weaveworks/libgitops#38](https://github.com/weaveworks/libgitops/issues/38) for more details regarding the integration.
+
+See the [`pkg/gitdir`](pkg/gitdir) package for details.
+
+### Utilities - `pkg/util`
+
+This package contains utilities used by the rest of the library. The most interesting thing here is the `Patcher`
+under [`pkg/util/patch`](pkg/util/patch), which can be used to apply patches to `pkg/runtime.Object` compliant types.
+
+## Getting Help
+
+If you have any questions about, feedback for or problems with `libgitops`:
+
+- Invite yourself to the [Weave Users Slack](https://slack.weave.works/).
+- Ask a question on the [#general](https://weave-community.slack.com/messages/general/) Slack channel.
+- [File an issue](https://github.com/weaveworks/libgitops/issues/new).
+
+Your feedback is always welcome!
+
+## Maintainers
+
+- Chanwit Kaewkasi, [@chanwit](https://github.com/chanwit)
+
+## Notes
+
+This project was formerly called `gitops-toolkit`, but has now been given a more descriptive name.
+If you've ended up here, you might be looking for the real [GitOps Toolkit](https://github.com/fluxcd/toolkit).
diff --git a/pkg/gitdir/transport.go b/pkg/storage/client/transactional/distributed/git/transport.go
similarity index 91%
rename from pkg/gitdir/transport.go
rename to pkg/storage/client/transactional/distributed/git/transport.go
index df2c325e..3ce8411b 100644
--- a/pkg/gitdir/transport.go
+++ b/pkg/storage/client/transactional/distributed/git/transport.go
@@ -1,10 +1,10 @@
-package gitdir
+package git
import (
"errors"
"github.com/fluxcd/go-git-providers/gitprovider"
- "github.com/fluxcd/toolkit/pkg/ssh/knownhosts"
+ "github.com/fluxcd/pkg/ssh/knownhosts"
"github.com/go-git/go-git/v5/plumbing/transport"
"github.com/go-git/go-git/v5/plumbing/transport/http"
"github.com/go-git/go-git/v5/plumbing/transport/ssh"
@@ -17,6 +17,8 @@ type AuthMethod interface {
transport.AuthMethod
// TransportType defines what transport type should be used with this method
TransportType() gitprovider.TransportType
+ // AuthMethod also implements the option interface
+ Option
}
// NewSSHAuthMethod creates a new AuthMethod for the Git SSH protocol, using a given
@@ -71,3 +73,7 @@ type authMethod struct {
func (a *authMethod) TransportType() gitprovider.TransportType {
return a.t
}
+
+func (a *authMethod) ApplyTo(target *Options) {
+ target.AuthMethod = a
+}
diff --git a/pkg/storage/client/transactional/distributed/interfaces.go b/pkg/storage/client/transactional/distributed/interfaces.go
new file mode 100644
index 00000000..31262667
--- /dev/null
+++ b/pkg/storage/client/transactional/distributed/interfaces.go
@@ -0,0 +1,82 @@
+package distributed
+
+import (
+ "context"
+ "time"
+
+ "github.com/weaveworks/libgitops/pkg/storage/client/transactional"
+ "github.com/weaveworks/libgitops/pkg/storage/commit"
+)
+
+type Client interface {
+ GenericClient
+
+ AtHash(commit.Hash) Client
+ AtRef(commit.Ref) Client
+}
+
+// Client is a client that can sync state with a remote in a transactional way.
+//
+// A distributed.Client is itself most likely both a CommitHook and TransactionHook; if so,
+// it should be automatically registered with the transactional.Client's *HookChain in the
+// distributed.Client's constructor.
+type GenericClient interface {
+ // The distributed Client extends the transactional Client
+ transactional.GenericClient
+
+ // StartResyncLoop starts a resync loop for the given branches for
+ // the given interval.
+ //
+ // resyncCacheInterval specifies the interval for which resyncs
+ // (remote Pulls) should be run in the background. The duration must
+ // be positive, and non-zero.
+ //
+ // resync specifies what symbolic references to sync. The default is
+ // []string{""}, i.e. only the "default" branch.
+ //
+ // ctx should be used to cancel the loop, if needed.
+ //
+ // While it is technically possible to start many of these resync
+ // loops, it is not recommended. Start it once, for all the branches
+ // you need. The branches will be pulled synchronously in order. The
+ // resync interval is non-sliding, which means that the interval
+ // includes the time of the operations.
+ StartResyncLoop(ctx context.Context, resyncCacheInterval time.Duration, resync ...commit.Ref)
+
+ // Remote exposes the underlying remote used
+ Remote() Remote
+}
+
+type Remote interface {
+ // Push pushes the attached branch (of the ctx) to the remote.
+ // Push must block as long as the operation is in progress, but also
+ // respect the timeout set on ctx and return instantly after it expires.
+ //
+ // It is guaranteed that Pull() and Push() are never called racily at
+ // the same time for the same branch, BUT Pull() and Push() might be called
+ // at the same time in any order for distinct branches. If the underlying
+ // Remote transport only supports one "writer transport" to it at the same time,
+ // the Remote must coordinate pulls and pushes with a mutex internally.
+ Push(ctx context.Context) error
+
+ // Pull pulls the attached branch (of the ctx) from the remote.
+ // Pull must block as long as the operation is in progress, but also
+ // respect the timeout set on ctx and return instantly after it expires.
+ //
+ // It is guaranteed that Pull() and Push() are never called racily at
+ // the same time for the same branch, BUT Pull() and Push() might be called
+ // at the same time in any order for distinct branches. If the underlying
+ // Remote transport only supports one "writer transport" to it at the same time,
+ // the Remote must coordinate pulls and pushes with a mutex internally.
+ Pull(ctx context.Context) error
+}
+
+// LockableRemote describes a remote that supports locking a remote branch for writing.
+type LockableRemote interface {
+ Remote
+
+ // Lock locks the branch attached to the context for writing, for the given duration.
+ Lock(ctx context.Context, d time.Duration) error
+ // Unlock reverses the write lock created by Lock()
+ Unlock(ctx context.Context) error
+}
diff --git a/pkg/storage/client/transactional/distributed/options.go b/pkg/storage/client/transactional/distributed/options.go
new file mode 100644
index 00000000..4640ce9a
--- /dev/null
+++ b/pkg/storage/client/transactional/distributed/options.go
@@ -0,0 +1,97 @@
+package distributed
+
+import "time"
+
+// ClientOption is an interface for applying options to ClientOptions.
+type ClientOption interface {
+ ApplyToClient(*ClientOptions)
+}
+
+// ClientOptions specify options on how the distributed client should
+// act according to the PACELC theorem.
+//
+// The following configurations correspond to the PACELC levels:
+//
+// PC/EC: CacheValidDuration == 0 && RemoteErrorStream == nil:
+// This makes every read first do a remote Pull(), and fails
+// critically if the Pull operation fails. Transactions fail
+// if Push() fails.
+//
+// PC/EL: CacheValidDuration > 0 && RemoteErrorStream == nil:
+// This makes a read do a remote Pull only if the delta between
+// the last Pull and time.Now() exceeds CacheValidDuration.
+// StartResyncLoop(resyncCacheInterval) can be used to
+// periodically Pull in the background, so that the latency
+// of reads are minimal. Transactions and reads fail if
+// Push() or Pull() fail.
+//
+// PA/EL: RemoteErrorStream != nil:
+// How often reads invoke Pull() is given by CacheValidDuration
+// and StartResyncLoop(resyncCacheInterval) as per above.
+// However, when a Pull() or Push() is invoked from a read or
+// transaction, and a network partition happens, such errors are
+// non-critical for the operation to succeed, as Availability is
+// favored and cached objects are returned.
+type ClientOptions struct {
+ // CacheValidDuration is the period of time the cache is still
+ // valid since its last resync (remote Pull). If set to 0; all
+ // reads will invoke a resync right before reading; as the cache
+ // is never valid. This option set to 0 favors Consistency over
+ // Availability.
+ //
+ // CacheValidDuration == 0 and RemoteErrorStream != nil must not
+ // be set at the same time; as they contradict.
+ //
+ // Default: 1m
+ CacheValidDuration time.Duration
+ // RemoteErrorStream specifies a stream in which to readirect
+ // errors from the remote, instead of returning them to the caller.
+ // This is useful for allowing "offline operation", and favoring
+ // Availability over Consistency when a Partition happens (i.e.
+ // the network is unreachable). In normal operation, remote Push/Pull
+ // errors would propagate to the caller and "fail" the Transaction,
+ // however, if that is not desired, those errors can be propagated
+ // here, and the caller will succeed with the transaction.
+ // Default: nil (optional)
+ RemoteErrorStream chan error
+
+ // Default: 30s for all
+ LockTimeout time.Duration
+ PullTimeout time.Duration
+ PushTimeout time.Duration
+}
+
+func (o *ClientOptions) ApplyToClient(target *ClientOptions) {
+ if o.CacheValidDuration != 0 {
+ target.CacheValidDuration = o.CacheValidDuration
+ }
+ if o.RemoteErrorStream != nil {
+ target.RemoteErrorStream = o.RemoteErrorStream
+ }
+ if o.LockTimeout != 0 {
+ target.LockTimeout = o.LockTimeout
+ }
+ if o.PullTimeout != 0 {
+ target.PullTimeout = o.PullTimeout
+ }
+ if o.PushTimeout != 0 {
+ target.PushTimeout = o.PushTimeout
+ }
+}
+
+func (o *ClientOptions) ApplyOptions(opts []ClientOption) *ClientOptions {
+ for _, opt := range opts {
+ opt.ApplyToClient(o)
+ }
+ return o
+}
+
+func defaultOptions() *ClientOptions {
+ return &ClientOptions{
+ CacheValidDuration: 1 * time.Minute,
+ RemoteErrorStream: nil,
+ LockTimeout: 30 * time.Second,
+ PullTimeout: 30 * time.Second,
+ PushTimeout: 30 * time.Second,
+ }
+}
diff --git a/pkg/storage/client/transactional/handlers.go b/pkg/storage/client/transactional/handlers.go
new file mode 100644
index 00000000..2890eb34
--- /dev/null
+++ b/pkg/storage/client/transactional/handlers.go
@@ -0,0 +1,123 @@
+package transactional
+
+import (
+ "context"
+
+ "github.com/weaveworks/libgitops/pkg/storage/commit"
+)
+
+type TxInfo struct {
+ Target commit.MutableTarget
+ Options TxOptions
+}
+
+type CommitHookChain interface {
+ // The chain also itself implements CommitHook
+ CommitHook
+ // Register registers a new CommitHook to the chain
+ Register(CommitHook)
+}
+
+// CommitHook executes directly before and after a commit is being made.
+// If the transaction fails before a commit could happen, these will never
+// be run.
+type CommitHook interface {
+ // PreCommitHook executes arbitrary logic for the given transaction info
+ // and commit info; if an error is returned, the commit won't happen.
+ PreCommitHook(ctx context.Context, info TxInfo, req commit.Request) error
+ // PostCommitHook executes arbitrary logic for the given transaction info
+ // and commit info; if an error is returned, the commit will happen in the
+ // case of a BranchTx on the head branch; but the transaction itself will
+ // fail. In the case of a "normal" transaction; the commit will be made,
+ // but later rolled back.
+ PostCommitHook(ctx context.Context, info TxInfo, req commit.Request) error
+}
+
+var _ CommitHookChain = &MultiCommitHook{}
+var _ CommitHook = &MultiCommitHook{}
+
+type MultiCommitHook struct {
+ CommitHooks []CommitHook
+}
+
+func (m *MultiCommitHook) Register(h CommitHook) {
+ m.CommitHooks = append(m.CommitHooks, h)
+}
+
+func (m *MultiCommitHook) PreCommitHook(ctx context.Context, info TxInfo, req commit.Request) error {
+ for _, ch := range m.CommitHooks {
+ if ch == nil {
+ continue
+ }
+ if err := ch.PreCommitHook(ctx, info, req); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (m *MultiCommitHook) PostCommitHook(ctx context.Context, info TxInfo, req commit.Request) error {
+ for _, ch := range m.CommitHooks {
+ if ch == nil {
+ continue
+ }
+ if err := ch.PostCommitHook(ctx, info, req); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type TransactionHookChain interface {
+ // The chain also itself implements TransactionHook
+ TransactionHook
+ // Register registers a new TransactionHook to the chain
+ Register(TransactionHook)
+}
+
+// TransactionHook provides a way to extend transaction behavior. Regardless
+// of the result of the transaction; these will always be run.
+type TransactionHook interface {
+ // PreTransactionHook executes before CreateBranch has been called for the
+ // TransactionManager in BranchTx mode; and in any case before any user-tx-specific
+ // code starts executing.
+ PreTransactionHook(ctx context.Context, info TxInfo) error
+ // PostTransactionHook executes when a transaction is terminated, either due
+ // to an Abort() or a successful Commit() or CreateTx().
+ PostTransactionHook(ctx context.Context, info TxInfo) error
+}
+
+var _ TransactionHookChain = &MultiTransactionHook{}
+var _ TransactionHook = &MultiTransactionHook{}
+
+type MultiTransactionHook struct {
+ TransactionHooks []TransactionHook
+}
+
+func (m *MultiTransactionHook) Register(h TransactionHook) {
+ m.TransactionHooks = append(m.TransactionHooks, h)
+}
+
+func (m *MultiTransactionHook) PreTransactionHook(ctx context.Context, info TxInfo) error {
+ for _, th := range m.TransactionHooks {
+ if th == nil {
+ continue
+ }
+ if err := th.PreTransactionHook(ctx, info); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (m *MultiTransactionHook) PostTransactionHook(ctx context.Context, info TxInfo) error {
+ for _, th := range m.TransactionHooks {
+ if th == nil {
+ continue
+ }
+ if err := th.PostTransactionHook(ctx, info); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/pkg/storage/client/transactional/interfaces.go b/pkg/storage/client/transactional/interfaces.go
new file mode 100644
index 00000000..0346352b
--- /dev/null
+++ b/pkg/storage/client/transactional/interfaces.go
@@ -0,0 +1,123 @@
+package transactional
+
+import (
+ "context"
+
+ "github.com/weaveworks/libgitops/pkg/storage/client"
+ "github.com/weaveworks/libgitops/pkg/storage/commit"
+ "github.com/weaveworks/libgitops/pkg/storage/core"
+)
+
+type Client interface {
+ GenericClient
+
+ AtHash(commit.Hash) Client
+ AtRef(commit.Ref) Client
+}
+
+type GenericClient interface {
+ client.Reader
+
+ CurrentHash() (commit.Hash, error)
+ CurrentRef() commit.Ref
+
+ TransactionManager() TransactionManager
+ // KeyedLock is used for locking operations targeting branches
+ //KeyedLock() syncutil.NamedLockMap
+
+ // BranchMerger is optional.
+ //BranchMerger() BranchMerger
+
+ // CommitHookChain is a chain of hooks that are run before and after a commit is made.
+ CommitHookChain() CommitHookChain
+ // TransactionHookChain is a chain of hooks that are run before and after a transaction.
+ TransactionHookChain() TransactionHookChain
+
+ // Transaction creates a new transaction on the branch stored in the context, so that
+ // no other writes to that branch can take place meanwhile.
+ //Transaction(ctx context.Context, opts ...TxOption) Tx
+
+ // Transaction creates a new "head" branch (if branchName) with the given {branchName} name, based
+ // on the "base" branch in the context. The "base" branch is not locked for writing while
+ // the transaction is running, but the head branch is.
+ Transaction(ctx context.Context, branchName string, opts ...TxOption) Tx
+}
+
+type TransactionManager interface {
+ // Init is run at the beginning of the transaction
+ Init(ctx context.Context, tx *TxInfo) error
+
+ // Commit creates a new commit for the given branch.
+ //
+ Commit(ctx context.Context, tx *TxInfo, req commit.Request) error
+
+ Abort(ctx context.Context, tx *TxInfo) error
+
+ //RefResolver() commit.RefResolver
+ //CommitResolver() commit.Resolver
+
+ // CreateBranch creates a new branch with the given target branch name. It forks out
+ // of the branch specified in the context.
+ //CreateBranch(ctx context.Context, branch string) error
+ // ResetToCleanVersion switches back to the given branch; but first discards all non-committed
+ // changes.
+ //ResetToCleanVersion(ctx context.Context, ref core.VersionRef) error
+
+ /*// LockVersionRef takes the VersionRef attached in the context, and makes sure that it is
+ // "locked" to the current commit for a given branch.
+ LockVersionRef(ctx context.Context) (context.Context, error)*/
+}
+
+/*type BranchMerger interface {
+ MergeBranches(ctx context.Context, base, head core.VersionRef, commit Commit) error
+}*/
+
+type CustomTxFunc func(ctx context.Context, writer client.Client) error
+
+type Tx interface {
+ Commit(req commit.Request) error
+ Abort(err error) error
+
+ Client() client.Client
+
+ // TODO: Rename to Do/Run/Execute
+ Run(CustomTxFunc) Tx
+
+ Get(key core.ObjectKey, obj client.Object) Tx
+ List(list client.ObjectList, opts ...client.ListOption) Tx
+
+ Create(obj client.Object, opts ...client.CreateOption) Tx
+ Update(obj client.Object, opts ...client.UpdateOption) Tx
+ Patch(obj client.Object, patch client.Patch, opts ...client.PatchOption) Tx
+ Delete(obj client.Object, opts ...client.DeleteOption) Tx
+ DeleteAllOf(obj client.Object, opts ...client.DeleteAllOfOption) Tx
+
+ UpdateStatus(obj client.Object, opts ...client.UpdateOption) Tx
+ PatchStatus(obj client.Object, patch client.Patch, opts ...client.PatchOption) Tx
+}
+
+/*type BranchTx interface {
+ CreateTx(Commit) BranchTxResult
+ Abort(err error) error
+
+ Client() client.Client
+
+ Custom(CustomTxFunc) BranchTx
+
+ Get(key core.ObjectKey, obj client.Object) BranchTx
+ List(list client.ObjectList, opts ...client.ListOption) BranchTx
+
+ Create(obj client.Object, opts ...client.CreateOption) BranchTx
+ Update(obj client.Object, opts ...client.UpdateOption) BranchTx
+ Patch(obj client.Object, patch client.Patch, opts ...client.PatchOption) BranchTx
+ Delete(obj client.Object, opts ...client.DeleteOption) BranchTx
+ DeleteAllOf(obj client.Object, opts ...client.DeleteAllOfOption) BranchTx
+
+ UpdateStatus(obj client.Object, opts ...client.UpdateOption) BranchTx
+ PatchStatus(obj client.Object, patch client.Patch, opts ...client.PatchOption) BranchTx
+}
+
+type BranchTxResult interface {
+ Error() error
+ MergeWithBase(Commit) error
+}*/
diff --git a/pkg/storage/client/transactional/options.go b/pkg/storage/client/transactional/options.go
new file mode 100644
index 00000000..76109e35
--- /dev/null
+++ b/pkg/storage/client/transactional/options.go
@@ -0,0 +1,78 @@
+package transactional
+
+import "time"
+
+type TxOption interface {
+ ApplyToTx(*TxOptions)
+}
+
+var _ TxOption = &TxOptions{}
+
+func defaultTxOptions() *TxOptions {
+ return &TxOptions{
+ Timeout: 1 * time.Minute,
+ //Mode: TxModeAtomic,
+ }
+}
+
+type TxOptions struct {
+ // Timeout is the maximum time one run of the transaction can take.
+ Timeout time.Duration
+ // Retry is by default 0, which means "no retries". If it's specified to be
+ // negative, retries (with backoff) are infinite. If the function specified is
+ // non-re-entrant, use a retry of only 0.
+ Retry *int32
+
+ // Success scenario for git would be if --ff-only succeeds cleanly.
+ // Git always tries an --ff-only git push in the beginning, then optionally
+ // tries some merge strategy, and then finally retries (return signature should
+ // be (error, bool) where the bool specifies whether to keep retrying or not)
+ // Git-recognized strategies are: AutoMerge (which is what "git pull" does by default)
+ MergeStrategy string
+ //Mode TxMode
+}
+
+func (o *TxOptions) ApplyToTx(target *TxOptions) {
+ if o.Timeout != 0 {
+ target.Timeout = o.Timeout
+ }
+ /*if len(o.Mode) != 0 {
+ target.Mode = o.Mode
+ }*/
+}
+
+func (o *TxOptions) ApplyOptions(opts []TxOption) *TxOptions {
+ for _, opt := range opts {
+ opt.ApplyToTx(o)
+ }
+ return o
+}
+
+/*var _ TxOption = TxMode("")
+
+type TxMode string
+
+const (
+ // TxModeAtomic makes the transaction fully atomic, i.e. so
+ // that any read happening against the target branch during the
+ // lifetime of the transaction will be blocked until the completition
+ // of the transaction.
+ TxModeAtomic TxMode = "Atomic"
+ // TxModeAllowReading will allow reads targeting the given
+ // branch a transaction is executing against; but before the
+ // transaction has completed all reads will strictly return
+ // the data available prior to the transaction taking place.
+ TxModeAllowReading TxMode = "AllowReading"
+)
+
+func (m TxMode) ApplyToTx(target *TxOptions) {
+ target.Mode = m
+}*/
+
+var _ TxOption = TxTimeout(0)
+
+type TxTimeout time.Duration
+
+func (t TxTimeout) ApplyToTx(target *TxOptions) {
+ target.Timeout = time.Duration(t)
+}
diff --git a/pkg/storage/client/transactional/test_test.go b/pkg/storage/client/transactional/test_test.go
new file mode 100644
index 00000000..7c0c85e5
--- /dev/null
+++ b/pkg/storage/client/transactional/test_test.go
@@ -0,0 +1,51 @@
+package transactional_test
+
+import (
+ "context"
+ "strings"
+ "testing"
+
+ "github.com/weaveworks/libgitops/pkg/storage/client"
+ "github.com/weaveworks/libgitops/pkg/storage/client/transactional"
+ "github.com/weaveworks/libgitops/pkg/storage/client/transactional/distributed/git/github"
+ "github.com/weaveworks/libgitops/pkg/storage/commit"
+ "sigs.k8s.io/cluster-api/api/v1alpha3"
+)
+
+func TestFoo(t *testing.T) {
+ gitClient, _ := transactional.NewGeneric(nil, nil)
+ ctx := context.Background()
+
+ var machineList v1alpha3.MachineList
+ _ = gitClient.
+ AtRef(commit.AtBranch("main")). // Start tx from main branch
+ Transaction(ctx, "foo-update-"). // Autogenerated suffix
+ List(&machineList). // Load all Machine object into machineList
+ Run(func(ctx context.Context, txClient client.Client) error {
+ for i := range machineList.Items {
+ machine := &machineList.Items[i]
+ // Skip all machines whose names don't start with foo
+ if !strings.HasPrefix(machine.Name, "foo") {
+ continue
+ }
+ machine.ClusterName = "weave-gitops-cluster"
+ // Update the Machine object in Git
+ if err := txClient.Update(ctx, machine); err != nil {
+ return err
+ }
+ }
+ return nil
+ }). // Do a commit, and a PR using go-git-providers, too.
+ Commit(github.PullRequest{
+ Request: commit.GenericRequest{
+ Name: "Lucas Käldström",
+ Email: "lucas@weave.works",
+ Title: "Update CAPI machines",
+ Description: "Machines with prefix foo are now in the Weave cluster",
+ },
+ TargetBranch: "main",
+ Labels: []string{"kind/automatic"},
+ Assignees: []string{"luxas"},
+ Milestone: "v1.0.1",
+ })
+}
diff --git a/pkg/storage/client/transactional/tx.go b/pkg/storage/client/transactional/tx.go
new file mode 100644
index 00000000..b2380f43
--- /dev/null
+++ b/pkg/storage/client/transactional/tx.go
@@ -0,0 +1,26 @@
+package transactional
+
+import "github.com/weaveworks/libgitops/pkg/storage/commit"
+
+type txImpl struct {
+ *txCommon
+}
+
+func (tx *txImpl) Commit(c commit.Request) error {
+ // Run the operations, and try to create the commit
+ if err := tx.tryApplyAndCommitOperations(c); err != nil {
+ // If we failed with the transaction, abort directly
+ return tx.Abort(err)
+ }
+
+ // We successfully completed all the tasks needed
+ // Now, cleanup and unlock the branch
+ return tx.cleanupFunc()
+}
+
+func (tx *txImpl) Custom(op CustomTxFunc) Tx {
+ tx.ops = append(tx.ops, func() error {
+ return op(tx.ctx)
+ })
+ return tx
+}
diff --git a/pkg/storage/client/transactional/tx_branch.go b/pkg/storage/client/transactional/tx_branch.go
new file mode 100644
index 00000000..851bfd0b
--- /dev/null
+++ b/pkg/storage/client/transactional/tx_branch.go
@@ -0,0 +1,68 @@
+package transactional
+
+/*
+type txBranchImpl struct {
+ *txCommon
+
+ merger BranchMerger
+}
+
+func (tx *txBranchImpl) CreateTx(c Commit) BranchTxResult {
+ // Run the operations, and try to create the commit
+ if err := tx.tryApplyAndCommitOperations(c); err != nil {
+ // If we failed with the transaction, abort directly, and
+ // return the error wrapped in a BranchTxResult
+ abortErr := tx.Abort(err)
+ return newErrTxResult(abortErr)
+ }
+
+ // We successfully completed all the tasks needed
+ // Now, cleanup and unlock the branch
+ cleanupErr := tx.cleanupFunc()
+
+ // Allow the merger to merge, if supported
+ return &txResultImpl{
+ err: cleanupErr,
+ ctx: tx.ctx,
+ //merger: tx.merger,
+ baseBranch: tx.info.Base,
+ headBranch: tx.info.Head,
+ }
+}
+
+func (tx *txBranchImpl) Custom(op CustomTxFunc) BranchTx {
+ tx.ops = append(tx.ops, func() error {
+ return op(tx.ctx)
+ })
+ return tx
+}
+
+func newErrTxResult(err error) *txResultImpl {
+ return &txResultImpl{err: err}
+}
+
+type txResultImpl struct {
+ err error
+ ctx context.Context
+ //merger BranchMerger
+ baseBranch core.VersionRef
+ headBranch core.VersionRef
+}
+
+func (r *txResultImpl) Error() error {
+ return r.err
+}
+
+func (r *txResultImpl) MergeWithBase(c Commit) error {
+ // If there is an internal error, return it
+ if r.err != nil {
+ return r.err
+ }
+ // Make sure we have a merger
+ if r.merger == nil {
+ return fmt.Errorf("TxResult: The BranchMerger is nil")
+ }
+ // Try to merge the branch
+ return r.merger.MergeBranches(r.ctx, r.baseBranch, r.headBranch, c)
+}
+*/
diff --git a/pkg/storage/client/transactional/tx_common.go b/pkg/storage/client/transactional/tx_common.go
new file mode 100644
index 00000000..0cab3044
--- /dev/null
+++ b/pkg/storage/client/transactional/tx_common.go
@@ -0,0 +1,72 @@
+package transactional
+
+import (
+ "context"
+
+ "github.com/weaveworks/libgitops/pkg/storage/client"
+ "github.com/weaveworks/libgitops/pkg/storage/commit"
+ utilerrs "k8s.io/apimachinery/pkg/util/errors"
+)
+
+type txFunc func() error
+
+type txCommon struct {
+ err error
+ c client.Client
+ manager TransactionManager
+ commitHook CommitHook
+ ctx context.Context
+ ops []txFunc
+ info TxInfo
+ cleanupFunc txFunc
+}
+
+func (tx *txCommon) Client() client.Client {
+ return tx.c
+}
+
+func (tx *txCommon) Abort(err error) error {
+ // Run the cleanup function and return an aggregate of the two possible errors
+ return utilerrs.NewAggregate([]error{
+ err,
+ tx.cleanupFunc(),
+ })
+}
+
+func (tx *txCommon) handlePreCommit(c commit.Request) txFunc {
+ return func() error {
+ return tx.commitHook.PreCommitHook(tx.ctx, tx.info, c)
+ }
+}
+
+func (tx *txCommon) commit(c commit.Request) txFunc {
+ return func() error {
+ return tx.manager.Commit(tx.ctx, &tx.info, c)
+ }
+}
+
+func (tx *txCommon) handlePostCommit(c commit.Request) txFunc {
+ return func() error {
+ return tx.commitHook.PostCommitHook(tx.ctx, tx.info, c)
+ }
+}
+
+func (tx *txCommon) tryApplyAndCommitOperations(c commit.Request) error {
+ // If an error occurred already before, just return it directly
+ if tx.err != nil {
+ return tx.err
+ }
+
+ // First, all registered client operations are run
+ // Then Pre-commit, commit, and post-commit functions are run
+ // If at any stage the context is cancelled, an error is returned
+ // immediately, and no more functions in the chain are run. The
+ // same goes for errors from any of the functions, the chain is
+ // immediately interrupted on errors.
+ return execTransactionsCtx(tx.ctx, append(
+ tx.ops,
+ tx.handlePreCommit(c),
+ tx.commit(c),
+ tx.handlePostCommit(c),
+ ))
+}
diff --git a/pkg/storage/client/transactional/tx_ops.go b/pkg/storage/client/transactional/tx_ops.go
new file mode 100644
index 00000000..b87f3d33
--- /dev/null
+++ b/pkg/storage/client/transactional/tx_ops.go
@@ -0,0 +1,112 @@
+package transactional
+
+import (
+ "context"
+
+ "github.com/weaveworks/libgitops/pkg/storage/client"
+ "github.com/weaveworks/libgitops/pkg/storage/core"
+)
+
+// Implement the required "fluent/functional" methods on BranchTx.
+// Go doesn't have generics; hence we need to do this twice.
+
+func (tx *txImpl) Get(key core.ObjectKey, obj client.Object) Tx {
+ return tx.Custom(func(ctx context.Context) error {
+ return tx.c.Get(ctx, key, obj)
+ })
+}
+func (tx *txImpl) List(list client.ObjectList, opts ...client.ListOption) Tx {
+ return tx.Custom(func(ctx context.Context) error {
+ return tx.c.List(ctx, list, opts...)
+ })
+}
+
+func (tx *txImpl) Create(obj client.Object, opts ...client.CreateOption) Tx {
+ return tx.Custom(func(ctx context.Context) error {
+ return tx.c.Create(ctx, obj, opts...)
+ })
+}
+func (tx *txImpl) Update(obj client.Object, opts ...client.UpdateOption) Tx {
+ return tx.Custom(func(ctx context.Context) error {
+ return tx.c.Update(ctx, obj, opts...)
+ })
+}
+func (tx *txImpl) Patch(obj client.Object, patch client.Patch, opts ...client.PatchOption) Tx {
+ return tx.Custom(func(ctx context.Context) error {
+ return tx.c.Patch(ctx, obj, patch, opts...)
+ })
+}
+func (tx *txImpl) Delete(obj client.Object, opts ...client.DeleteOption) Tx {
+ return tx.Custom(func(ctx context.Context) error {
+ return tx.c.Delete(ctx, obj, opts...)
+ })
+}
+func (tx *txImpl) DeleteAllOf(obj client.Object, opts ...client.DeleteAllOfOption) Tx {
+ return tx.Custom(func(ctx context.Context) error {
+ return tx.c.DeleteAllOf(ctx, obj, opts...)
+ })
+}
+
+func (tx *txImpl) UpdateStatus(obj client.Object, opts ...client.UpdateOption) Tx {
+ return tx.Custom(func(ctx context.Context) error {
+ return nil // TODO tx.c.Status().Update(ctx, obj, opts...)
+ })
+}
+func (tx *txImpl) PatchStatus(obj client.Object, patch client.Patch, opts ...client.PatchOption) Tx {
+ return tx.Custom(func(ctx context.Context) error {
+ return nil // TODO tx.c.Status().Patch(ctx, obj, patch, opts...)
+ })
+}
+
+/*
+// Implement the required "fluent/functional" methods on BranchTx.
+// Go doesn't have generics; hence we need to do this twice.
+
+func (tx *txBranchImpl) Get(key core.ObjectKey, obj client.Object) BranchTx {
+ return tx.Custom(func(ctx context.Context) error {
+ return tx.c.Get(ctx, key, obj)
+ })
+}
+func (tx *txBranchImpl) List(list client.ObjectList, opts ...client.ListOption) BranchTx {
+ return tx.Custom(func(ctx context.Context) error {
+ return tx.c.List(ctx, list, opts...)
+ })
+}
+
+func (tx *txBranchImpl) Create(obj client.Object, opts ...client.CreateOption) BranchTx {
+ return tx.Custom(func(ctx context.Context) error {
+ return tx.c.Create(ctx, obj, opts...)
+ })
+}
+func (tx *txBranchImpl) Update(obj client.Object, opts ...client.UpdateOption) BranchTx {
+ return tx.Custom(func(ctx context.Context) error {
+ return tx.c.Update(ctx, obj, opts...)
+ })
+}
+func (tx *txBranchImpl) Patch(obj client.Object, patch client.Patch, opts ...client.PatchOption) BranchTx {
+ return tx.Custom(func(ctx context.Context) error {
+ return tx.c.Patch(ctx, obj, patch, opts...)
+ })
+}
+func (tx *txBranchImpl) Delete(obj client.Object, opts ...client.DeleteOption) BranchTx {
+ return tx.Custom(func(ctx context.Context) error {
+ return tx.c.Delete(ctx, obj, opts...)
+ })
+}
+func (tx *txBranchImpl) DeleteAllOf(obj client.Object, opts ...client.DeleteAllOfOption) BranchTx {
+ return tx.Custom(func(ctx context.Context) error {
+ return tx.c.DeleteAllOf(ctx, obj, opts...)
+ })
+}
+
+func (tx *txBranchImpl) UpdateStatus(obj client.Object, opts ...client.UpdateOption) BranchTx {
+ return tx.Custom(func(ctx context.Context) error {
+ return nil // TODO tx.c.Status().Update(ctx, obj, opts...)
+ })
+}
+func (tx *txBranchImpl) PatchStatus(obj client.Object, patch client.Patch, opts ...client.PatchOption) BranchTx {
+ return tx.Custom(func(ctx context.Context) error {
+ return nil // TODO tx.c.Status().Patch(ctx, obj, patch, opts...)
+ })
+}
+*/
diff --git a/pkg/storage/client/transactional/utils.go b/pkg/storage/client/transactional/utils.go
new file mode 100644
index 00000000..76a9c41d
--- /dev/null
+++ b/pkg/storage/client/transactional/utils.go
@@ -0,0 +1,41 @@
+package transactional
+
+import (
+ "context"
+ "crypto/rand"
+ "encoding/hex"
+)
+
+// execTransactionsCtx executes the functions in order. Before each
+// function in the chain is run; the context is checked for errors
+// (e.g. if it has been cancelled or timed out). If a context error
+// is returned, or if a function in the chain returns an error, this
+// function returns directly, without executing the rest of the
+// functions in the chain.
+func execTransactionsCtx(ctx context.Context, funcs []txFunc) error {
+ for _, fn := range funcs {
+ if err := ctx.Err(); err != nil {
+ return err
+ }
+ if err := fn(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// randomSHA returns a hex-encoded string from {byteLen} random bytes.
+func randomSHA(byteLen int) (string, error) {
+ b := make([]byte, byteLen)
+ _, err := rand.Read(b)
+ if err != nil {
+ return "", err
+ }
+ return hex.EncodeToString(b), nil
+}
+
+/*type BranchLocker struct {
+ KeyedLock sync.KeyedLock
+}
+
+func (l *BranchLocker) */
diff --git a/pkg/storage/client/utils.go b/pkg/storage/client/utils.go
new file mode 100644
index 00000000..fb8c79a5
--- /dev/null
+++ b/pkg/storage/client/utils.go
@@ -0,0 +1,23 @@
+package client
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/weaveworks/libgitops/pkg/storage/core"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+var ErrNoMetadata = errors.New("it is required to embed ObjectMeta into the serialized API type")
+
+func NewObjectForGVK(gvk core.GroupVersionKind, scheme *runtime.Scheme) (Object, error) {
+ kobj, err := scheme.New(gvk)
+ if err != nil {
+ return nil, err
+ }
+ obj, ok := kobj.(Object)
+ if !ok {
+ return nil, fmt.Errorf("%w: %s", ErrNoMetadata, gvk)
+ }
+ return obj, nil
+}
diff --git a/pkg/storage/commit/commit.go b/pkg/storage/commit/commit.go
new file mode 100644
index 00000000..4c3120fd
--- /dev/null
+++ b/pkg/storage/commit/commit.go
@@ -0,0 +1,257 @@
+package commit
+
+import (
+ "context"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "time"
+
+ "k8s.io/apimachinery/pkg/types"
+ "k8s.io/apimachinery/pkg/util/uuid"
+)
+
+/*type Resolver interface {
+ ResolveHash(Hash) (Commit, error)
+}
+
+type Commit interface {
+ Hash() Hash
+ Author() Signature
+ Message() Message
+ Parents() []Hash
+}*/
+
+type Request interface {
+ Author() Signature
+ Message() Message
+ Validate() error
+}
+
+type Signature interface {
+ // Name describes the author's name (e.g. as per git config)
+ // +required
+ Name() string
+ // Email describes the author's email (e.g. as per git config).
+ // It is optional generally, but might be required by some specific
+ // implementations.
+ // +optional
+ Email() string
+ // When is the timestamp of the signature.
+ // +optional
+ When() *time.Time
+ // The String() method must return a (ideally both human- and machine-
+ // readable) concatenated string including the name and email (if
+ // applicable) of the author.
+ fmt.Stringer
+}
+
+type Message interface {
+ // Title describes the change concisely, so it can be used e.g. as
+ // a commit message or PR title. Certain implementations might enforce
+ // character limits on this string.
+ // +required
+ Title() string
+ // Description contains optional extra, more detailed information
+ // about the change.
+ // +optional
+ Description() string
+ // The String() method must return a (ideally both human- and machine-
+ // readable) concatenated string including the title and description
+ // (if applicable) of the author.
+ fmt.Stringer
+}
+
+// Hash represents an immutable commit hash, represented as a set of "raw" bytes,
+// probably from some hash function (e.g. SHA-1 or SHA-2-256), along with a well-defined
+// string representation, e.g. Hexadecimal encoding.
+type Hash interface {
+ Hash() []byte
+ // TODO: Rename to encoded and keep fmt.Stringer a debug print?
+ String() string
+
+ // RefSource returns the source of this computed Hash lock. Can be nil,
+ // in case this doesn't have a symbolic source. This can be used for consumers
+ // to understand how this immutable revision was computed.
+ // TODO: Do we need this?
+ // RefSource() Ref
+}
+
+func WithHash(ctx context.Context, h Hash) context.Context {
+ if h == nil {
+ return ctx
+ }
+ return context.WithValue(ctx, hashCtxKey, h)
+}
+
+func GetHash(ctx context.Context) (Hash, bool) {
+ h, ok := ctx.Value(hashCtxKey).(Hash)
+ return h, ok
+}
+
+type hashCtxKeyStruct struct{}
+
+var hashCtxKey = hashCtxKeyStruct{}
+
+type RefResolver interface {
+ ResolveRef(Ref) (Hash, error)
+ // GetRef extracts the Ref from the context, and if empty,
+ // defaults it to the default Ref.
+ GetRef(ctx context.Context) Ref
+}
+
+func SHA1(h [20]byte, src Ref) Hash {
+ b := make([]byte, 20)
+ copy(b, h[:])
+ return &hash{hash: b, encoded: hex.EncodeToString(b), src: src}
+}
+
+func SHA1String(h string, src Ref) (Hash, bool) {
+ b, err := hex.DecodeString(h)
+ if err != nil {
+ return nil, false
+ }
+ return &hash{hash: b, encoded: h, src: src}, true
+}
+
+func At(symbolic string) Ref {
+ return &symbolicRef{RefTypeUnknown, symbolic, 0}
+}
+
+func Default() Ref {
+ return AtBranch("") // Signifies the default branch
+}
+
+func AtBranch(b string) Ref {
+ return Before(b, 0)
+}
+
+func Before(b string, n uint8) Ref {
+ return &symbolicRef{RefTypeBranch, b, n}
+}
+
+func AtTag(t string) Ref {
+ return &symbolicRef{RefTypeTag, t, 0}
+}
+
+func AtHash(h string) Ref {
+ return &symbolicRef{RefTypeHash, h, 0}
+}
+
+type RefType int
+
+func (t RefType) String() string {
+ switch t {
+ case RefTypeUnknown:
+ return "unknown"
+ case RefTypeHash:
+ return "hash"
+ case RefTypeBranch:
+ return "branch"
+ case RefTypeTag:
+ return "tag"
+ default:
+ return fmt.Sprintf("", t)
+ }
+}
+
+const (
+ RefTypeUnknown RefType = iota
+ RefTypeHash
+ // A branch is generally a mutable
+ RefTypeBranch
+ RefTypeTag
+)
+
+type Ref interface {
+ Resolve(RefResolver) (Hash, error)
+
+ // TODO: Keep fmt.Stringer for debug printing, rename to Target() string?
+ Target() string
+ Type() RefType
+ Before() uint8
+}
+
+func WithRef(ctx context.Context, s Ref) context.Context {
+ if s == nil {
+ return ctx
+ }
+ return context.WithValue(ctx, symbolicCtxKey, s)
+}
+
+func GetRef(ctx context.Context) (Ref, bool) {
+ s, ok := ctx.Value(symbolicCtxKey).(Ref)
+ return s, ok
+}
+
+type symbolicCtxKeyStruct struct{}
+
+var symbolicCtxKey = symbolicCtxKeyStruct{}
+
+type hash struct {
+ hash []byte
+ encoded string
+ src Ref
+}
+
+func (h *hash) Hash() []byte { return h.hash }
+func (h *hash) String() string { return h.encoded }
+func (h *hash) RefSource() Ref { return h.src }
+
+type symbolicRef struct {
+ st RefType
+ ref string
+ before uint8
+}
+
+func (r *symbolicRef) Target() string { return r.ref }
+func (r *symbolicRef) Before() uint8 { return r.before }
+func (r *symbolicRef) Type() RefType { return r.st }
+func (r *symbolicRef) Resolve(res RefResolver) (Hash, error) {
+ // TODO: This is probably resolver-specific
+ if r.before != 0 && r.st != RefTypeUnknown && r.st != RefTypeBranch {
+ return nil, errors.New("setting Before() only works for branches")
+ }
+ return res.ResolveRef(r)
+}
+
+type MutableTarget interface {
+ // The branch to which the resulting commit from the transaction
+ // is added.
+ DestBranch() string
+
+ BaseCommit() Hash
+ UUID() types.UID
+
+ // TODO: Implement fmt.Stringer for debug printing
+}
+
+func NewMutableTarget(headBranch string, baseCommit Hash) MutableTarget {
+ return &mutableTarget{headBranch: headBranch, baseCommit: baseCommit, uuid: uuid.NewUUID()}
+}
+
+type mutableTarget struct {
+ headBranch string
+ baseCommit Hash
+ uuid types.UID
+}
+
+func (m *mutableTarget) DestBranch() string { return m.headBranch }
+func (m *mutableTarget) BaseCommit() Hash { return m.baseCommit }
+func (m *mutableTarget) UUID() types.UID { return m.uuid }
+
+func WithMutableTarget(ctx context.Context, m MutableTarget) context.Context {
+ if m == nil {
+ return ctx
+ }
+ return context.WithValue(ctx, mutableCtxKey, m)
+}
+
+func GetMutableTarget(ctx context.Context) (MutableTarget, bool) {
+ mt, ok := ctx.Value(mutableCtxKey).(MutableTarget)
+ return mt, ok
+}
+
+type mutableCtxKeyStruct struct{}
+
+var mutableCtxKey = mutableCtxKeyStruct{}
diff --git a/pkg/storage/commit/pr/pull_request.go b/pkg/storage/commit/pr/pull_request.go
new file mode 100644
index 00000000..589b6988
--- /dev/null
+++ b/pkg/storage/commit/pr/pull_request.go
@@ -0,0 +1,28 @@
+package pr
+
+import "github.com/weaveworks/libgitops/pkg/storage/commit"
+
+// Request can be returned when committing a transaction instead of a
+// commit.Request, if the intention is to create a PR in e.g. GitHub.
+type Request interface {
+ // PullRequest is a superset of commit.Request
+ commit.Request
+ PullRequest() Metadata
+}
+
+type Metadata interface {
+ // TargetBranch specifies what branch the Pull Request head branch should
+ // be merged into.
+ // +required
+ TargetBranch() string
+ // Labels specifies what labels should be applied on the PR.
+ // +optional
+ Labels() []string
+ // Assignees specifies what user login names should be assigned to this PR.
+ // Note: Only users with "pull" access or more can be assigned.
+ // +optional
+ Assignees() []string
+ // Milestone specifies what milestone this should be attached to.
+ // +optional
+ Milestone() string
+}
diff --git a/pkg/storage/commit/request.go b/pkg/storage/commit/request.go
new file mode 100644
index 00000000..573e315d
--- /dev/null
+++ b/pkg/storage/commit/request.go
@@ -0,0 +1,59 @@
+package commit
+
+import (
+ "fmt"
+ "time"
+
+ "k8s.io/apimachinery/pkg/util/validation"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+)
+
+var _ Request = GenericRequest{}
+
+type GenericRequest struct {
+ Name string
+ Email string
+ When *time.Time
+ Title string
+ Description string
+}
+
+func (r GenericRequest) Author() Signature {
+ return &signature{&r.Name, &r.Email, r.When}
+}
+func (r GenericRequest) Message() Message {
+ return &message{&r.Title, &r.Description}
+}
+func (r GenericRequest) Validate() error {
+ root := field.NewPath("commit.GenericRequest")
+ allErrs := field.ErrorList{}
+ if len(r.Name) == 0 {
+ allErrs = append(allErrs, field.Required(root.Child("Name"), validation.EmptyError()))
+ }
+ // TODO: Should this be optional or not?
+ if len(r.Email) == 0 {
+ allErrs = append(allErrs, field.Required(root.Child("Email"), validation.EmptyError()))
+ }
+ if len(r.Title) == 0 {
+ allErrs = append(allErrs, field.Required(root.Child("Title"), validation.EmptyError()))
+ }
+ return allErrs.ToAggregate()
+}
+
+type signature struct {
+ name, email *string
+ when *time.Time
+}
+
+func (s *signature) Name() string { return *s.name }
+func (s *signature) Email() string { return *s.email }
+func (s *signature) When() *time.Time { return s.when }
+func (s *signature) String() string { return fmt.Sprintf("%s <%s>", s.Name(), s.Email()) }
+
+type message struct {
+ title, desc *string
+}
+
+func (m *message) Title() string { return *m.title }
+func (m *message) Description() string { return *m.desc }
+func (m *message) String() string { return fmt.Sprintf("%s\n\n%s", m.Title(), m.Description()) }
diff --git a/pkg/storage/core/errors.go b/pkg/storage/core/errors.go
new file mode 100644
index 00000000..f65895a9
--- /dev/null
+++ b/pkg/storage/core/errors.go
@@ -0,0 +1,50 @@
+package core
+
+import (
+ goerrors "errors"
+
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/util/validation/field"
+)
+
+var (
+ // ErrNotImplemented can be returned for implementers that do not
+ // implement a specific part of an interface.
+ ErrNotImplemented = goerrors.New("not implemented")
+ // ErrInvalidParameter specifies that a given parameter
+ // (as a public struct field or function argument) was
+ // not valid according to the specification.
+ ErrInvalidParameter = goerrors.New("invalid parameter")
+)
+
+// StatusError is an error that supports also conversion
+// to a metav1.Status struct for more detailed information.
+type StatusError interface {
+ error
+ errors.APIStatus
+}
+
+func NewErrNotFound(id UnversionedObjectID) StatusError {
+ return errors.NewNotFound(schema.GroupResource{
+ Group: id.GroupKind().Group,
+ Resource: id.GroupKind().Kind,
+ }, id.ObjectKey().Name)
+}
+
+func NewErrAlreadyExists(id UnversionedObjectID) StatusError {
+ return errors.NewAlreadyExists(schema.GroupResource{
+ Group: id.GroupKind().Group,
+ Resource: id.GroupKind().Kind,
+ }, id.ObjectKey().Name)
+}
+
+func NewErrInvalid(id UnversionedObjectID, errs field.ErrorList) StatusError {
+ return errors.NewInvalid(id.GroupKind(), id.ObjectKey().Name, errs)
+}
+
+var (
+ IsErrNotFound = errors.IsNotFound
+ IsErrAlreadyExists = errors.IsAlreadyExists
+ IsErrInvalid = errors.IsInvalid
+)
diff --git a/pkg/storage/core/interfaces.go b/pkg/storage/core/interfaces.go
new file mode 100644
index 00000000..108216b1
--- /dev/null
+++ b/pkg/storage/core/interfaces.go
@@ -0,0 +1,73 @@
+package core
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/types"
+)
+
+// Note: package core must not depend on any other parts of the libgitops repo, only
+// essentially anything under k8s.io/apimachinery is ok.
+
+// GroupVersionKind and ObjectID aliases
+type GroupKind = schema.GroupKind
+type GroupVersion = schema.GroupVersion
+type GroupVersionKind = schema.GroupVersionKind
+type ObjectKey = types.NamespacedName
+
+// ObjectKeyFromObject returns the ObjectKey of a given metav1.Object.
+func ObjectKeyFromMetav1Object(obj metav1.Object) ObjectKey {
+ return ObjectKey{
+ Name: obj.GetName(),
+ Namespace: obj.GetNamespace(),
+ }
+}
+
+// UnversionedObjectID represents an ID for an Object whose version is not known.
+// However, the Group, Kind, Name and optionally, Namespace is known and should
+// uniquely identify the Object at a specific moment in time.
+type UnversionedObjectID interface {
+ GroupKind() GroupKind
+ ObjectKey() ObjectKey
+
+ WithVersion(version string) ObjectID
+ String() string // Implements fmt.Stringer
+}
+
+// ObjectID is a superset of UnversionedObjectID, that also specifies an exact version.
+type ObjectID interface {
+ UnversionedObjectID
+
+ // WithoutVersion unwraps the underlying UnversionedObjectID; so it can
+ // be used for e.g. equality (e.g. as a map key) operations.
+ WithoutVersion() UnversionedObjectID
+ GroupVersionKind() GroupVersionKind
+}
+
+/*// VersionRef is an interface that describes a reference to a specific version (for now; branch)
+// of Objects in a Storage or Client.
+type VersionRef interface {
+ // VersionRef returns the version reference, e.g. a branch name or a commit hash.
+ VersionRef() string
+ // IsZeroValue determines if this VersionRef is the "zero value", which means
+ // that the caller should figure out how to handle that the user did not
+ // give specific opinions of what version of the Object to get.
+ IsZeroValue() bool
+ // IsImmutable determines if the given version reference is immutable, i.e. cannot be modified.
+ IsImmutable() bool
+}
+
+type LockedVersionRef interface {
+ VersionRef
+
+ ImmutableRef() VersionRef
+}
+
+type MutableVersionRef interface {
+ MutableRefName() string
+ IsDefault() bool
+}
+
+type ImmutableVersionRef interface {
+ ImmutableHash() string
+}*/
diff --git a/pkg/storage/core/objectid.go b/pkg/storage/core/objectid.go
new file mode 100644
index 00000000..134b9127
--- /dev/null
+++ b/pkg/storage/core/objectid.go
@@ -0,0 +1,46 @@
+package core
+
+import (
+ "fmt"
+
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// NewUnversionedObjectID creates a new UnversionedObjectID from the given GroupKind and ObjectKey.
+func NewUnversionedObjectID(gk GroupKind, key ObjectKey) UnversionedObjectID {
+ return unversionedObjectID{gk, key}
+}
+
+type unversionedObjectID struct {
+ gk GroupKind
+ key ObjectKey
+}
+
+func (o unversionedObjectID) GroupKind() GroupKind { return o.gk }
+func (o unversionedObjectID) ObjectKey() ObjectKey { return o.key }
+func (o unversionedObjectID) WithVersion(version string) ObjectID { return objectID{o, version} }
+func (o unversionedObjectID) String() string {
+ if o.key.Namespace == "" {
+ return fmt.Sprintf("UnversionedObjectID: groupkind=%s name=%s", o.gk, o.key.Name)
+ }
+ return fmt.Sprintf("UnversionedObjectID: groupkind=%s name=%s ns=%s", o.gk, o.key.Name, o.key.Namespace)
+}
+
+// NewObjectID creates a new ObjectID from the given GroupVersionKind and ObjectKey.
+func NewObjectID(gvk GroupVersionKind, key ObjectKey) ObjectID {
+ return objectID{unversionedObjectID{gvk.GroupKind(), key}, gvk.Version}
+}
+
+type objectID struct {
+ unversionedObjectID
+ version string
+}
+
+func (o objectID) WithoutVersion() UnversionedObjectID { return o.unversionedObjectID }
+func (o objectID) GroupVersionKind() schema.GroupVersionKind { return o.gk.WithVersion(o.version) }
+func (o objectID) String() string {
+ if o.key.Namespace == "" {
+ return fmt.Sprintf("ObjectID: groupkind=%s version=%s name=%s", o.gk, o.version, o.key.Name)
+ }
+ return fmt.Sprintf("ObjectID: groupkind=%s version=%s name=%s ns=%s", o.gk, o.version, o.key.Name, o.key.Namespace)
+}
diff --git a/pkg/storage/core/set_unversioned_objectid.go b/pkg/storage/core/set_unversioned_objectid.go
new file mode 100644
index 00000000..18c553c8
--- /dev/null
+++ b/pkg/storage/core/set_unversioned_objectid.go
@@ -0,0 +1,179 @@
+package core
+
+import (
+ "fmt"
+
+ "k8s.io/apimachinery/pkg/util/sets"
+)
+
+// UnversionedObjectIDSet is a set of UnversionedObjectIDs.
+// The underlying data storage is a map[UnversionedObjectID]struct{}.
+//
+// This interface should be as similar as possible to
+// k8s.io/apimachinery/pkg/util/sets.
+type UnversionedObjectIDSet interface {
+ // Has returns true if the object ID is in the set.
+ Has(id UnversionedObjectID) bool
+ // HasAny returns true if any of the object IDs are in the set.
+ HasAny(ids ...UnversionedObjectID) bool
+
+ // Insert inserts the given object IDs into the set. Returns itself.
+ // WARNING: This mutates the receiver. Issue a Copy() before if not desired.
+ Insert(ids ...UnversionedObjectID) UnversionedObjectIDSet
+ // InsertSet inserts the contents of s2 into itself, and returns itself.
+ // WARNING: This mutates the receiver. Issue a Copy() before if not desired.
+ InsertSet(s2 UnversionedObjectIDSet) UnversionedObjectIDSet
+
+ // Delete deletes the given object IDs from the set. Returns itself.
+ // WARNING: This mutates the receiver. Issue a Copy() before if not desired.
+ Delete(ids ...UnversionedObjectID) UnversionedObjectIDSet
+ // DeleteSet deletes the contents of s2 from itself, and returns itself.
+ // WARNING: This mutates the receiver. Issue a Copy() before if not desired.
+ DeleteSet(s2 UnversionedObjectIDSet) UnversionedObjectIDSet
+
+ // List lists the given object IDs of the set, in no particular order.
+ // List requires O(n) extra memory, when n == Len(). Use ForEach for no copying.
+ List() []UnversionedObjectID
+ // ForEach runs fn for each item in the set. Does not copy the whole list.
+ // Uses a for-range underneath, so it is even safe to delete items underneath, ref:
+ // https://stackoverflow.com/questions/23229975/is-it-safe-to-remove-selected-keys-from-map-within-a-range-loop
+ // If an error occurs, the rest of the IDs are not traversed. Iteration order is random.
+ ForEach(fn func(id UnversionedObjectID) error) error
+
+ // Len returns the length of the set
+ Len() int
+ // Copy does a shallow copy of set element; but performs a deep copy of the
+ // underlying map itself; so mutating operations don't propagate unwantedly.
+ Copy() UnversionedObjectIDSet
+
+ // Difference returns a set of objects that are not in s2
+ // For example:
+ // s1 = {a1, a2, a3}
+ // s2 = {a1, a2, a4, a5}
+ // s1.Difference(s2) = {a3}
+ // s2.Difference(s1) = {a4, a5}
+ Difference(s2 UnversionedObjectIDSet) UnversionedObjectIDSet
+
+ // String returns a human-friendly representation
+ String() string
+}
+
+// NewUnversionedObjectIDSet creates a new UnversionedObjectIDSet
+func NewUnversionedObjectIDSet(ids ...UnversionedObjectID) UnversionedObjectIDSet {
+ return NewUnversionedObjectIDSetSized(len(ids), ids...)
+}
+
+// NewUnversionedObjectIDSet creates a new UnversionedObjectIDSet for a given map length.
+func NewUnversionedObjectIDSetSized(len int, ids ...UnversionedObjectID) UnversionedObjectIDSet {
+ return (make(unversionedObjectIDSet, len)).Insert(ids...)
+}
+
+// UnversionedObjectIDSetFromVersionedSlice transforms a slice of ObjectIDs to
+// an unversioned set.
+func UnversionedObjectIDSetFromVersionedSlice(versioned []ObjectID) UnversionedObjectIDSet {
+ result := NewUnversionedObjectIDSetSized(len(versioned))
+ for _, id := range versioned {
+ // Important: We should "unwrap" to a plain UnversionedObjectID here, so
+ // equality works properly in e.g. map keys.
+ result.Insert(id.WithoutVersion())
+ }
+ return result
+}
+
+type unversionedObjectIDSet map[UnversionedObjectID]sets.Empty
+
+func (s unversionedObjectIDSet) Has(id UnversionedObjectID) bool {
+ _, found := s[id]
+ return found
+}
+
+func (s unversionedObjectIDSet) HasAny(ids ...UnversionedObjectID) bool {
+ for _, id := range ids {
+ if s.Has(id) {
+ return true
+ }
+ }
+ return false
+}
+
+func (s unversionedObjectIDSet) Insert(ids ...UnversionedObjectID) UnversionedObjectIDSet {
+ for _, id := range ids {
+ s[id] = sets.Empty{}
+ }
+ return s
+}
+
+// InsertSet inserts the contents of s2 into itself, and returns itself.
+func (s unversionedObjectIDSet) InsertSet(s2 UnversionedObjectIDSet) UnversionedObjectIDSet {
+ _ = s2.ForEach(func(id UnversionedObjectID) error {
+ s[id] = sets.Empty{}
+ return nil
+ })
+ return s
+}
+
+func (s unversionedObjectIDSet) Delete(ids ...UnversionedObjectID) UnversionedObjectIDSet {
+ for _, id := range ids {
+ delete(s, id)
+ }
+ return s
+}
+
+// DeleteSet deletes the contents of s2 from itself, and returns itself.
+func (s unversionedObjectIDSet) DeleteSet(s2 UnversionedObjectIDSet) UnversionedObjectIDSet {
+ _ = s2.ForEach(func(id UnversionedObjectID) error {
+ delete(s, id)
+ return nil
+ })
+ return s
+}
+
+func (s unversionedObjectIDSet) List() []UnversionedObjectID {
+ list := make([]UnversionedObjectID, 0, len(s))
+ for id := range s {
+ list = append(list, id)
+ }
+ return list
+}
+
+// ForEach runs fn for each item in the set. Does not copy the whole list.
+func (s unversionedObjectIDSet) ForEach(fn func(id UnversionedObjectID) error) (err error) {
+ for key := range s {
+ if err = fn(key); err != nil {
+ return
+ }
+ }
+ return
+}
+
+func (s unversionedObjectIDSet) Len() int {
+ return len(s)
+}
+
+func (s unversionedObjectIDSet) Copy() UnversionedObjectIDSet {
+ result := make(unversionedObjectIDSet, s.Len())
+ for id := range s {
+ result.Insert(id)
+ }
+ return result
+}
+
+// Difference returns a set of objects that are not in s2
+// For example:
+// s1 = {a1, a2, a3}
+// s2 = {a1, a2, a4, a5}
+// s1.Difference(s2) = {a3}
+// s2.Difference(s1) = {a4, a5}
+func (s unversionedObjectIDSet) Difference(s2 UnversionedObjectIDSet) UnversionedObjectIDSet {
+ result := NewUnversionedObjectIDSet()
+ for key := range s {
+ if !s2.Has(key) {
+ result.Insert(key)
+ }
+ }
+ return result
+}
+
+func (s unversionedObjectIDSet) String() string {
+ return fmt.Sprintf("UnversionedObjectIDSet (len=%d): %v", s.Len(), s.List())
+}
diff --git a/pkg/storage/core/versionref.go b/pkg/storage/core/versionref.go
new file mode 100644
index 00000000..f5728a07
--- /dev/null
+++ b/pkg/storage/core/versionref.go
@@ -0,0 +1,94 @@
+package core
+
+/*
+type VersionRefResolver interface {
+ //IsImmutable(ref string) (bool, error)
+ // Turns a branch name into a commit hash. If ref already is an existing commit, this is a no-op.
+ ResolveVersionRef(ref string) (c Commit, immutableRef bool, err error)
+}
+
+type Commit string
+
+var versionRefKey = versionRefKeyImpl{}
+
+type versionRefKeyImpl struct{}
+
+// WithVersionRef attaches the given VersionRef to a Context (it
+// overwrites if one already exists in ctx). The key for the ref
+// is private in this package, so one must use this function to
+// register it.
+func WithVersionRef(ctx context.Context, ref string) context.Context {
+ return context.WithValue(ctx, versionRefKey, ref)
+}
+
+// GetVersionRef returns the VersionRef attached to this context.
+// If there is no attached VersionRef, or it is nil, a BranchRef
+// with branch "" will be returned as the "zero value" of VersionRef.
+func GetVersionRef(ctx context.Context) string {
+ r, ok := ctx.Value(versionRefKey).(string)
+ // Return default ref if none specified
+ if !ok {
+ return ""
+ }
+ return r
+}*/
+
+/*
+// NewMutableVersionRef creates a new VersionRef for a given branch. It is
+// valid for the branch to be ""; in this case it means the "zero
+// value", or unspecified branch to be more precise, where the caller
+// can choose how to handle.
+func NewMutableVersionRef(ref string) VersionRef {
+ return versionRef{
+ ref: ref,
+ immutable: false,
+ }
+}
+
+func WithMutableVersionRef(ctx context.Context, ref string) context.Context {
+ return WithVersionRef(ctx, NewMutableVersionRef(ref))
+}
+
+func NewImmutableVersionRef(ref string) VersionRef {
+ return versionRef{
+ ref: ref,
+ immutable: false,
+ }
+}
+
+func WithImmutableVersionRef(ctx context.Context, ref string) context.Context {
+ return WithVersionRef(ctx, NewImmutableVersionRef(ref))
+}
+
+type versionRef struct {
+ ref string
+ immutable bool
+}
+
+func (r versionRef) VersionRef() string { return r.ref }
+
+// A branch is considered the zero value if the branch is an empty string,
+// which it is e.g. when there was no VersionRef associated with a Context.
+func (r versionRef) IsZeroValue() bool { return r.ref == "" }
+
+func (r versionRef) IsImmutable() bool { return r.immutable }
+
+func NewLockedVersionRef(mutable, immutable VersionRef) LockedVersionRef {
+ if !immutable.IsImmutable() {
+ panic("NewLockedVersionRef: immutable VersionRef must be immutable")
+ }
+ return lockedVersionRef{
+ mutable: mutable,
+ immutable: immutable,
+ }
+}
+
+type lockedVersionRef struct {
+ mutable, immutable VersionRef
+}
+
+func (r lockedVersionRef) VersionRef() string { return r.mutable.VersionRef() }
+func (r lockedVersionRef) IsZeroValue() bool { return r.mutable.IsZeroValue() }
+func (r lockedVersionRef) IsImmutable() bool { return r.mutable.IsImmutable() }
+func (r lockedVersionRef) ImmutableRef() VersionRef { return r.immutable }
+*/
diff --git a/pkg/storage/event/event.go b/pkg/storage/event/event.go
new file mode 100644
index 00000000..9a6ee903
--- /dev/null
+++ b/pkg/storage/event/event.go
@@ -0,0 +1,50 @@
+package event
+
+/*
+// ObjectEventType is an enum describing a change in an Object's state.
+type ObjectEventType byte
+
+var _ fmt.Stringer = ObjectEventType(0)
+
+const (
+ ObjectEventNone ObjectEventType = iota // 0
+ ObjectEventCreate // 1
+ ObjectEventUpdate // 2
+ ObjectEventDelete // 3
+ ObjectEventSync // 4
+ ObjectEventError // 5
+)
+
+func (o ObjectEventType) String() string {
+ switch o {
+ case 0:
+ return "NONE"
+ case 1:
+ return "CREATE"
+ case 2:
+ return "UPDATE"
+ case 3:
+ return "DELETE"
+ case 4:
+ return "SYNC"
+ case 5:
+ return "ERROR"
+ }
+
+ // Should never happen
+ return "UNKNOWN"
+}
+
+// ObjectEvent describes a change that has been observed
+// for the given object with the given ID.
+type ObjectEvent struct {
+ ID core.UnversionedObjectID
+ Type ObjectEventType
+ // Error is only non-nil if Type == ObjectEventError. The receiver
+ // must check/respect the error if set.
+ Error error
+}
+
+// ObjectEventStream is a channel of ObjectEvents
+type ObjectEventStream chan *ObjectEvent
+*/
diff --git a/pkg/storage/event/interfaces.go b/pkg/storage/event/interfaces.go
new file mode 100644
index 00000000..112634fb
--- /dev/null
+++ b/pkg/storage/event/interfaces.go
@@ -0,0 +1,30 @@
+package event
+
+import (
+ "context"
+ "io"
+
+ "github.com/weaveworks/libgitops/pkg/storage"
+ "k8s.io/apimachinery/pkg/watch"
+)
+
+// EventStorage is the abstract combination of a normal Storage, and
+// a possiblility to listen for changes to objects as they change.
+// TODO: Maybe we could use some of controller-runtime's built-in functionality
+// for watching for changes?
+// TODO: Use k8s.io/apimachinery/pkg/watch#EventType et al instead.
+type Storage interface {
+ storage.Storage
+
+ // WatchForObjectEvents starts feeding ObjectEvents into the given "into"
+ // channel. The caller is responsible for setting a channel buffering
+ // limit large enough to not block normal operation. An error might
+ // be returned if a maximum amount of watches has been opened already,
+ // e.g. ErrTooManyWatches.
+ //WatchForObjectEvents(ctx context.Context, into ObjectEventStream) error
+
+ Watch(ctx context.Context) (watch.Interface, error)
+
+ // Close closes the EventStorage and underlying resources gracefully.
+ io.Closer
+}
diff --git a/pkg/storage/filesystem/dir_traversal.go b/pkg/storage/filesystem/dir_traversal.go
new file mode 100644
index 00000000..b5be448b
--- /dev/null
+++ b/pkg/storage/filesystem/dir_traversal.go
@@ -0,0 +1,39 @@
+package filesystem
+
+import (
+ "context"
+ "io/fs"
+)
+
+// ListValidFilesInFilesystem discovers files in the given Filesystem that has a
+// ContentType that contentTyper recognizes, and is not a path that is excluded by
+// pathExcluder.
+func ListValidFilesInFilesystem(ctx context.Context, givenFs Filesystem, contentTyper ContentTyper, pathExcluder PathExcluder) (files []string, err error) {
+ fsys := givenFs.WithContext(ctx)
+ err = fs.WalkDir(fsys, "", func(path string, d fs.DirEntry, err error) error {
+ if err != nil {
+ return err
+ }
+
+ // Only include valid files
+ if !d.IsDir() && IsValidFileInFilesystem(ctx, givenFs, contentTyper, pathExcluder, path) {
+ files = append(files, path)
+ }
+ return nil
+ })
+ return
+}
+
+// IsValidFileInFilesystem checks if file (a relative path) has a ContentType
+// that contentTyper recognizes, and is not a path that is excluded by pathExcluder.
+func IsValidFileInFilesystem(ctx context.Context, fs Filesystem, contentTyper ContentTyper, pathExcluder PathExcluder, file string) bool {
+ // return false if this path should be excluded
+ // pathExcluder can be nil; watch out for that
+ if pathExcluder != nil && pathExcluder.ShouldExcludePath(file) {
+ return false
+ }
+
+ // If the content type is valid for this path, err == nil => return true
+ _, err := contentTyper.ContentTypeForPath(ctx, fs, file)
+ return err == nil
+}
diff --git a/pkg/storage/filesystem/fileevents/events.go b/pkg/storage/filesystem/fileevents/events.go
new file mode 100644
index 00000000..09822c4a
--- /dev/null
+++ b/pkg/storage/filesystem/fileevents/events.go
@@ -0,0 +1,41 @@
+package fileevents
+
+// FileEventType is an enum describing a change in a file's state
+type FileEventType byte
+
+const (
+ FileEventNone FileEventType = iota // 0
+ FileEventModify // 1
+ FileEventDelete // 2
+ FileEventMove // 3
+)
+
+func (e FileEventType) String() string {
+ switch e {
+ case 0:
+ return "NONE"
+ case 1:
+ return "MODIFY"
+ case 2:
+ return "DELETE"
+ case 3:
+ return "MOVE"
+ }
+
+ return "UNKNOWN"
+}
+
+// FileEvent describes a file change of a certain kind at a certain
+// (relative) path. Often emitted by FileEventsEmitter.
+type FileEvent struct {
+ // TODO: Include some kind of commit.Hash here that is optional?
+ // TODO: Make this an interface?
+ Path string
+ Type FileEventType
+
+ // OldPath is non-empty only when Type == FileEventMove.
+ OldPath string
+}
+
+// FileEventStream is a channel of FileEvents
+type FileEventStream chan *FileEvent
diff --git a/pkg/storage/filesystem/fileevents/inotify/filewatcher.go b/pkg/storage/filesystem/fileevents/inotify/filewatcher.go
new file mode 100644
index 00000000..a66f1467
--- /dev/null
+++ b/pkg/storage/filesystem/fileevents/inotify/filewatcher.go
@@ -0,0 +1,434 @@
+package inotify
+
+import (
+ "context"
+ "fmt"
+ "path/filepath"
+ gosync "sync"
+ "time"
+
+ "github.com/rjeczalik/notify"
+ "github.com/sirupsen/logrus"
+ log "github.com/sirupsen/logrus"
+ "github.com/weaveworks/libgitops/pkg/storage/filesystem/fileevents"
+ "github.com/weaveworks/libgitops/pkg/util/sync"
+ "golang.org/x/sys/unix"
+ "k8s.io/apimachinery/pkg/util/sets"
+)
+
+var listenEvents = []notify.Event{notify.InDelete, notify.InCloseWrite, notify.InMovedFrom, notify.InMovedTo}
+
+var eventMap = map[notify.Event]fileevents.FileEventType{
+ notify.InDelete: fileevents.FileEventDelete,
+ notify.InCloseWrite: fileevents.FileEventModify,
+}
+
+// combinedEvents describes the event combinations to concatenate,
+// this is iterated in order, so the longest matches should be first
+var combinedEvents = []combinedEvent{
+ // DELETE + MODIFY => MODIFY
+ {[]notify.Event{notify.InDelete, notify.InCloseWrite}, 1},
+ // MODIFY + DELETE => NONE
+ {[]notify.Event{notify.InCloseWrite, notify.InDelete}, -1},
+ // MOVE + MODIFY => MOVE
+ {[]notify.Event{notify.InMovedTo, notify.InCloseWrite}, 0},
+ // MODIFY + MOVE => MOVE
+ {[]notify.Event{notify.InCloseWrite, notify.InMovedTo}, 1},
+}
+
+type notifyEvents []notify.EventInfo
+type eventStream chan notify.EventInfo
+
+// FileEvents is a slice of FileEvent pointers
+type FileEvents []*fileevents.FileEvent
+
+// NewFileWatcher returns a list of files in the watched directory in
+// addition to the generated FileWatcher, it can be used to populate
+// MappedRawStorage fileMappings
+func NewFileWatcher(dir string, opts ...FileWatcherOption) (fileevents.Emitter, error) {
+ o := defaultOptions().ApplyOptions(opts)
+
+ w := &FileWatcher{
+ dir: dir,
+
+ inbound: make(eventStream, int(o.EventBufferSize)),
+ // outbound is set by WatchForFileEvents
+ outboundMu: &gosync.Mutex{},
+
+ suspendFiles: sets.NewString(),
+ suspendFilesMu: &gosync.Mutex{},
+
+ // monitor and dispatcher set by WatchForFileEvents, guarded by outboundMu
+
+ opts: *o,
+
+ batcher: sync.NewBatchWriter(o.BatchTimeout),
+ }
+
+ log.Tracef("FileWatcher: Starting recursive watch for %q", dir)
+ if err := notify.Watch(filepath.Join(dir, "..."), w.inbound, listenEvents...); err != nil {
+ notify.Stop(w.inbound)
+ return nil, err
+ }
+
+ return w, nil
+}
+
+var _ fileevents.Emitter = &FileWatcher{}
+
+// FileWatcher recursively monitors changes in files in the given directory
+// and sends out events based on their state changes. Only files conforming
+// to validSuffix are monitored. The FileWatcher can be suspended for a single
+// event at a time to eliminate updates by WatchStorage causing a loop.
+type FileWatcher struct {
+ dir string
+ // channels
+ inbound eventStream
+ outbound fileevents.FileEventStream
+ outboundMu *gosync.Mutex
+ // new suspend logic
+ suspendFiles sets.String
+ suspendFilesMu *gosync.Mutex
+ // goroutines
+ monitor *sync.Monitor
+ dispatcher *sync.Monitor
+
+ // opts
+ opts FileWatcherOptions
+ // the batcher is used for properly sending many concurrent inotify events
+ // as a group, after a specified timeout. This fixes the issue of one single
+ // file operation being registered as many different inotify events
+ batcher *sync.BatchWriter
+}
+
+func (w *FileWatcher) WatchForFileEvents(ctx context.Context, into fileevents.FileEventStream) error {
+ w.outboundMu.Lock()
+ defer w.outboundMu.Unlock()
+ // We don't support more than one listener
+ // TODO: maybe support many listeners in the future?
+ if w.outbound != nil {
+ return fmt.Errorf("FileWatcher: not more than one watch supported: %w", fileevents.ErrTooManyWatches)
+ }
+ w.outbound = into
+ // Start the backing goroutines
+ w.monitor = sync.RunMonitor(w.monitorFunc)
+ w.dispatcher = sync.RunMonitor(w.dispatchFunc)
+ return nil // all ok
+}
+
+func (w *FileWatcher) monitorFunc() error {
+ log.Debug("FileWatcher: Monitoring thread started")
+ defer log.Debug("FileWatcher: Monitoring thread stopped")
+ defer close(w.outbound) // Close the update stream after the FileWatcher has stopped
+
+ for {
+ event, ok := <-w.inbound
+ if !ok {
+ logrus.Debug("FileWatcher: Got non-ok channel recieve from w.inbound, exiting monitorFunc")
+ return nil
+ }
+
+ if ievent(event).Mask&unix.IN_ISDIR != 0 {
+ continue // Skip directories
+ }
+
+ // Get the relative path between the root directory and the changed file
+ // Note: This is just used for the PathExcluder, absolute paths are used
+ // in the underlying file-change computation system, until in sendUpdate
+ // where they are converted into relative paths before sending to the listener.
+ relativePath, err := filepath.Rel(w.dir, event.Path())
+ if err != nil {
+ logrus.Errorf("FileWatcher: Error occurred when computing relative path between: %s and %s: %v", w.dir, event.Path(), err)
+ continue
+ }
+
+ // The PathExcluder only operates on relative paths.
+ if w.opts.PathExcluder.ShouldExcludePath(relativePath) {
+ continue // Skip ignored files
+ }
+
+ // Get any events registered for the specific file, and append the specified event
+ var eventList notifyEvents
+ if val, ok := w.batcher.Load(event.Path()); ok {
+ eventList = val.(notifyEvents)
+ }
+
+ eventList = append(eventList, event)
+
+ // Register the event in the map, and dispatch all the events at once after the timeout
+ // Note that event.Path() is just the unique key for the map here, it is not actually
+ // used later when computing the changes of the filesystem.
+ w.batcher.Store(event.Path(), eventList)
+ log.Debugf("FileWatcher: Registered inotify events %v for path %q", eventList, event.Path())
+ }
+}
+
+func (w *FileWatcher) dispatchFunc() error {
+ log.Debug("FileWatcher: Dispatch thread started")
+ defer log.Debug("FileWatcher: Dispatch thread stopped")
+
+ for {
+ // Wait until we have a batch dispatched to us
+ ok := w.batcher.ProcessBatch(func(_, val interface{}) bool {
+ // Concatenate all known events, and dispatch them to be handled one by one
+ for _, event := range w.concatenateEvents(val.(notifyEvents)) {
+ w.sendUpdate(event)
+ }
+
+ // Continue traversing the map
+ return true
+ })
+ if !ok {
+ logrus.Debug("FileWatcher: Got non-ok channel recieve from w.batcher, exiting dispatchFunc")
+ return nil // The BatchWriter channel is closed, stop processing
+ }
+
+ log.Debug("FileWatcher: Dispatched events batch and reset the events cache")
+ }
+}
+
+func (w *FileWatcher) sendUpdate(event *fileevents.FileEvent) {
+ // Get the relative path between the root directory and the changed file
+ relativePath, err := filepath.Rel(w.dir, event.Path)
+ if err != nil {
+ logrus.Errorf("FileWatcher: Error occurred when computing relative path between: %s and %s: %v", w.dir, event.Path, err)
+ return
+ }
+ // Replace the full path with the relative path for the signaling upstream
+ event.Path = relativePath
+
+ if len(event.OldPath) != 0 {
+ // Do the same for event.OldPath
+ relativePath, err = filepath.Rel(w.dir, event.OldPath)
+ if err != nil {
+ logrus.Errorf("FileWatcher: Error occurred when computing relative path between: %s and %s: %v", w.dir, event.OldPath, err)
+ return
+ }
+ // Replace the full path with the relative path for the signaling upstream
+ event.OldPath = relativePath
+ }
+
+ if w.shouldSuspendEvent(event.Path) {
+ log.Debugf("FileWatcher: Skipping suspended event %s for path: %q", event.Type, event.Path)
+ return // Skip the suspended event
+ }
+ if event.Type == fileevents.FileEventMove {
+ log.Debugf("FileWatcher: Sending update: %s: %q -> %q", event.Type, event.OldPath, event.Path)
+ } else {
+ log.Debugf("FileWatcher: Sending update: %s -> %q", event.Type, event.Path)
+ }
+
+ w.outbound <- event
+}
+
+// Close closes active underlying resources
+func (w *FileWatcher) Close() error {
+ notify.Stop(w.inbound)
+ w.batcher.Close()
+ close(w.inbound) // Close the inbound event stream
+ // No need to check the error here, as we only return nil above
+ _ = w.monitor.Wait()
+ _ = w.dispatcher.Wait()
+ return nil
+}
+
+// Suspend enables a one-time suspend for any event from the given path.
+// The path must be relative to the root directory, i.e. computed as
+// path = filepath.Rel(, ).
+func (w *FileWatcher) Suspend(_ context.Context, path string) {
+ w.suspendFilesMu.Lock()
+ defer w.suspendFilesMu.Unlock()
+ w.suspendFiles.Insert(path)
+}
+
+// shouldSuspendEvent checks if an event for the given path
+// should be suspended for one time. If it should, true will
+// be returned, and the mapping will be removed next time.
+func (w *FileWatcher) shouldSuspendEvent(path string) bool {
+ w.suspendFilesMu.Lock()
+ defer w.suspendFilesMu.Unlock()
+ // If the path should not be suspended, just return false and be done
+ if !w.suspendFiles.Has(path) {
+ return false
+ }
+ // Otherwise, remove it from the list and mark it as suspended
+ w.suspendFiles.Delete(path)
+ return true
+}
+
+func convertEvent(event notify.Event) fileevents.FileEventType {
+ if updateEvent, ok := eventMap[event]; ok {
+ return updateEvent
+ }
+
+ return fileevents.FileEventNone
+}
+
+func convertUpdate(event notify.EventInfo) *fileevents.FileEvent {
+ fileEvent := convertEvent(event.Event())
+ if fileEvent == fileevents.FileEventNone {
+ // This should never happen
+ panic(fmt.Sprintf("invalid event for update conversion: %q", event.Event().String()))
+ }
+
+ return &fileevents.FileEvent{
+ Path: event.Path(),
+ Type: fileEvent,
+ }
+}
+
+// moveCache caches an event during a move operation
+// and dispatches a FileUpdate if it's not cancelled
+type moveCache struct {
+ watcher *FileWatcher
+ event notify.EventInfo
+ timer *time.Timer
+}
+
+func (w *FileWatcher) newMoveCache(event notify.EventInfo) *moveCache {
+ m := &moveCache{
+ watcher: w,
+ event: event,
+ }
+
+ // moveCaches wait one second to be cancelled before firing
+ m.timer = time.AfterFunc(w.opts.BatchTimeout, m.incomplete)
+ return m
+}
+
+func (m *moveCache) cookie() uint32 {
+ return ievent(m.event).Cookie
+}
+
+// If the moveCache isn't cancelled, the move is considered incomplete and this
+// method is fired. A complete move consists out of a "from" event and a "to" event,
+// if only one is received, the file is moved in/out of a watched directory, which
+// is treated as a normal creation/deletion by this method.
+func (m *moveCache) incomplete() {
+ var evType fileevents.FileEventType
+
+ switch m.event.Event() {
+ case notify.InMovedFrom:
+ evType = fileevents.FileEventDelete
+ case notify.InMovedTo:
+ evType = fileevents.FileEventModify
+ default:
+ // This should never happen
+ panic(fmt.Sprintf("moveCache: unrecognized event: %v", m.event.Event()))
+ }
+
+ log.Tracef("moveCache: Timer expired for %d, dispatching...", m.cookie())
+ m.watcher.sendUpdate(&fileevents.FileEvent{Path: m.event.Path(), Type: evType})
+
+ // Delete the cache after the timer has fired
+ moveCachesMu.Lock()
+ delete(moveCaches, m.cookie())
+ moveCachesMu.Unlock()
+}
+
+func (m *moveCache) cancel() {
+ m.timer.Stop()
+ moveCachesMu.Lock()
+ delete(moveCaches, m.cookie())
+ moveCachesMu.Unlock()
+ log.Tracef("moveCache: Dispatching cancelled for %d", m.cookie())
+}
+
+var (
+ // moveCaches keeps track of active moves by cookie
+ moveCaches = make(map[uint32]*moveCache)
+ moveCachesMu = &gosync.RWMutex{}
+)
+
+// move processes InMovedFrom and InMovedTo events in any order
+// and dispatches FileUpdates when a move is detected
+func (w *FileWatcher) move(event notify.EventInfo) (moveUpdate *fileevents.FileEvent) {
+ cookie := ievent(event).Cookie
+ moveCachesMu.RLock()
+ cache, ok := moveCaches[cookie]
+ moveCachesMu.RUnlock()
+ if !ok {
+ // The cookie is not cached, create a new cache object for it
+ moveCachesMu.Lock()
+ moveCaches[cookie] = w.newMoveCache(event)
+ moveCachesMu.Unlock()
+ return
+ }
+
+ sourcePath, destPath := cache.event.Path(), event.Path()
+ switch event.Event() {
+ case notify.InMovedFrom:
+ sourcePath, destPath = destPath, sourcePath
+ fallthrough
+ case notify.InMovedTo:
+ cache.cancel() // Cancel dispatching the cache's incomplete move
+ moveUpdate = &fileevents.FileEvent{Path: destPath, OldPath: sourcePath, Type: fileevents.FileEventMove} // Register an internal, complete move instead
+ log.Tracef("FileWatcher: Detected move: %q -> %q", sourcePath, destPath)
+ }
+
+ return
+}
+
+// concatenateEvents takes in a slice of events and concatenates
+// all events possible based on combinedEvents. It also manages
+// file moving and conversion from notifyEvents to FileEvents
+func (w *FileWatcher) concatenateEvents(events notifyEvents) FileEvents {
+ for _, combinedEvent := range combinedEvents {
+ // Test if the prefix of the given events matches combinedEvent.input
+ if event, ok := combinedEvent.match(events); ok {
+ // If so, replace combinedEvent.input prefix in events with combinedEvent.output and recurse
+ concatenated := events[len(combinedEvent.input):]
+ if event != nil { // Prepend the concatenation result event if any
+ concatenated = append(notifyEvents{event}, concatenated...)
+ }
+
+ log.Tracef("FileWatcher: Concatenated events: %v -> %v", events, concatenated)
+ return w.concatenateEvents(concatenated)
+ }
+ }
+
+ // Convert the events to updates
+ updates := make(FileEvents, 0, len(events))
+ for _, event := range events {
+ switch event.Event() {
+ case notify.InMovedFrom, notify.InMovedTo:
+ // Send move-related events to w.move
+ if update := w.move(event); update != nil {
+ // Add the update to the list if we get something back
+ updates = append(updates, update)
+ }
+ default:
+ updates = append(updates, convertUpdate(event))
+ }
+ }
+
+ return updates
+}
+
+func ievent(event notify.EventInfo) *unix.InotifyEvent {
+ return event.Sys().(*unix.InotifyEvent)
+}
+
+// combinedEvent describes multiple events that should be concatenated into a single event
+type combinedEvent struct {
+ input []notify.Event // input is a slice of events to match (in bytes, it speeds up the comparison)
+ output int // output is the event's index that should be returned, negative values equal nil
+}
+
+func (c *combinedEvent) match(events notifyEvents) (notify.EventInfo, bool) {
+ if len(c.input) > len(events) {
+ return nil, false // Not enough events, cannot match
+ }
+
+ for i := 0; i < len(c.input); i++ {
+ if events[i].Event() != c.input[i] {
+ return nil, false
+ }
+ }
+
+ if c.output >= 0 {
+ return events[c.output], true
+ }
+
+ return nil, true
+}
diff --git a/pkg/util/watcher/filewatcher_test.go b/pkg/storage/filesystem/fileevents/inotify/filewatcher_test.go
similarity index 57%
rename from pkg/util/watcher/filewatcher_test.go
rename to pkg/storage/filesystem/fileevents/inotify/filewatcher_test.go
index b80f9b26..cb1105d9 100644
--- a/pkg/util/watcher/filewatcher_test.go
+++ b/pkg/storage/filesystem/fileevents/inotify/filewatcher_test.go
@@ -1,9 +1,12 @@
-package watcher
+package inotify
import (
+ "fmt"
+ "strings"
"testing"
"github.com/rjeczalik/notify"
+ "github.com/weaveworks/libgitops/pkg/storage/filesystem/fileevents"
"golang.org/x/sys/unix"
)
@@ -51,33 +54,33 @@ var testEvents = []notifyEvents{
},
}
-var targets = []FileEvents{
+var targets = []FileEventTypes{
{
- FileEventModify,
+ fileevents.FileEventModify,
},
{
- FileEventDelete,
+ fileevents.FileEventDelete,
},
{
- FileEventModify,
- FileEventMove,
- FileEventDelete,
+ fileevents.FileEventModify,
+ fileevents.FileEventMove,
+ fileevents.FileEventDelete,
},
{
- FileEventModify,
+ fileevents.FileEventModify,
},
{},
}
-func extractEvents(updates FileUpdates) (events FileEvents) {
- for _, update := range updates {
- events = append(events, update.Event)
+func extractEventTypes(events FileEvents) (eventTypes FileEventTypes) {
+ for _, event := range events {
+ eventTypes = append(eventTypes, event.Type)
}
return
}
-func eventsEqual(a, b FileEvents) bool {
+func eventsEqual(a, b FileEventTypes) bool {
if len(a) != len(b) {
return false
}
@@ -91,9 +94,29 @@ func eventsEqual(a, b FileEvents) bool {
return true
}
+// FileEventTypes is a slice of FileEventType
+type FileEventTypes []fileevents.FileEventType
+
+var _ fmt.Stringer = FileEventTypes{}
+
+func (e FileEventTypes) String() string {
+ strs := make([]string, 0, len(e))
+ for _, ev := range e {
+ strs = append(strs, ev.String())
+ }
+
+ return strings.Join(strs, ",")
+}
+
func TestEventConcatenation(t *testing.T) {
+ // TODO: Needs fixing
+ tmp := t.TempDir()
+ fw, err := NewFileWatcher(tmp)
+ if err != nil {
+ t.Fatal(err)
+ }
for i, e := range testEvents {
- result := extractEvents((&FileWatcher{}).concatenateEvents(e))
+ result := extractEventTypes(fw.(*FileWatcher).concatenateEvents(e))
if !eventsEqual(result, targets[i]) {
t.Errorf("wrong concatenation result: %v != %v", result, targets[i])
}
diff --git a/pkg/storage/filesystem/fileevents/inotify/options.go b/pkg/storage/filesystem/fileevents/inotify/options.go
new file mode 100644
index 00000000..2c48e5dc
--- /dev/null
+++ b/pkg/storage/filesystem/fileevents/inotify/options.go
@@ -0,0 +1,59 @@
+package inotify
+
+import (
+ "time"
+
+ "github.com/weaveworks/libgitops/pkg/storage/filesystem"
+)
+
+// How many inotify events we can buffer before watching is interrupted
+const DefaultEventBufferSize int32 = 4096
+
+type FileWatcherOption interface {
+ ApplyToFileWatcher(*FileWatcherOptions)
+}
+
+var _ FileWatcherOption = &FileWatcherOptions{}
+
+// FileWatcherOptions specifies options for the FileWatcher
+type FileWatcherOptions struct {
+ // BatchTimeout specifies the duration to wait after last event
+ // before dispatching grouped inotify events
+ // Default: 1s
+ BatchTimeout time.Duration
+ // EventBufferSize describes how many inotify events can be buffered
+ // before watching is interrupted/delayed.
+ // Default: DefaultEventBufferSize
+ EventBufferSize int32
+ // PathExcluder provides a way to exclude paths.
+ // Default: filesystem.DefaultPathExcluders()
+ PathExcluder filesystem.PathExcluder
+}
+
+func (o *FileWatcherOptions) ApplyToFileWatcher(target *FileWatcherOptions) {
+ if o.BatchTimeout != 0 {
+ target.BatchTimeout = o.BatchTimeout
+ }
+ if o.EventBufferSize != 0 {
+ target.EventBufferSize = o.EventBufferSize
+ }
+ if o.PathExcluder != nil {
+ target.PathExcluder = o.PathExcluder
+ }
+}
+
+func (o *FileWatcherOptions) ApplyOptions(opts []FileWatcherOption) *FileWatcherOptions {
+ for _, opt := range opts {
+ opt.ApplyToFileWatcher(o)
+ }
+ return o
+}
+
+// defaultOptions returns the default options
+func defaultOptions() *FileWatcherOptions {
+ return &FileWatcherOptions{
+ BatchTimeout: 1 * time.Second,
+ EventBufferSize: DefaultEventBufferSize,
+ PathExcluder: filesystem.DefaultPathExcluders(),
+ }
+}
diff --git a/pkg/storage/filesystem/fileevents/interfaces.go b/pkg/storage/filesystem/fileevents/interfaces.go
new file mode 100644
index 00000000..ea566f2d
--- /dev/null
+++ b/pkg/storage/filesystem/fileevents/interfaces.go
@@ -0,0 +1,52 @@
+package fileevents
+
+import (
+ "context"
+ "errors"
+ "io"
+
+ "github.com/weaveworks/libgitops/pkg/storage/event"
+ "github.com/weaveworks/libgitops/pkg/storage/filesystem"
+)
+
+var (
+ // ErrTooManyWatches can happen when trying to register too many
+ // watching reciever channels to an event emitter.
+ ErrTooManyWatches = errors.New("too many watches already opened")
+)
+
+// Emitter is an interface that provides high-level inotify-like
+// behaviour to consumers. It can be used e.g. by even higher-level
+// interfaces like FilesystemEventStorage.
+type Emitter interface {
+ // WatchForFileEvents starts feeding FileEvents into the given "into"
+ // channel. The caller is responsible for setting a channel buffering
+ // limit large enough to not block normal operation. An error might
+ // be returned if a maximum amount of watches has been opened already,
+ // e.g. ErrTooManyWatches.
+ //
+ // Note that it is the receiver's responsibility to "validate" the
+ // file so it matches any user defined policy (e.g. only specific
+ // content types, or a PathExcluder has been given).
+ WatchForFileEvents(ctx context.Context, into FileEventStream) error
+
+ // Suspend blocks the next event dispatch for this given path. Useful
+ // for not sending "your own" modification events into the
+ // FileEventStream that is listening. path is relative.
+ // TODO: Should this be handled at this level, or should the "figure out
+ // what is my own changes" be handled at higher levels in the stack?
+ Suspend(ctx context.Context, path string)
+
+ // Close closes the emitter gracefully.
+ io.Closer
+}
+
+// Storage is the union of a filesystem.Storage, and event.Storage,
+// and the possibility to listen for object updates from a Emitter.
+type Storage interface {
+ filesystem.Storage
+ event.Storage
+
+ // FileEventsEmitter gets the Emitter used internally.
+ FileEventsEmitter() Emitter
+}
diff --git a/pkg/storage/filesystem/filefinder_simple.go b/pkg/storage/filesystem/filefinder_simple.go
new file mode 100644
index 00000000..b1a2b167
--- /dev/null
+++ b/pkg/storage/filesystem/filefinder_simple.go
@@ -0,0 +1,274 @@
+package filesystem
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/weaveworks/libgitops/pkg/content"
+ "github.com/weaveworks/libgitops/pkg/storage"
+ "github.com/weaveworks/libgitops/pkg/storage/core"
+ "k8s.io/apimachinery/pkg/util/sets"
+)
+
+// NewSimpleStorage is a default opinionated constructor for a Storage
+// using SimpleFileFinder as the FileFinder, and the local disk as target.
+// If you need more advanced customizablility than provided here, you can compose
+// the call to filesystem.NewGeneric yourself.
+func NewSimpleStorage(dir string, namespacer storage.Namespacer, opts SimpleFileFinderOptions) (Storage, error) {
+ fs := NewOSFilesystem(dir)
+ fileFinder, err := NewSimpleFileFinder(fs, opts)
+ if err != nil {
+ return nil, err
+ }
+ // fileFinder and namespacer are validated by filesystem.NewGeneric.
+ return NewGeneric(fileFinder, namespacer)
+}
+
+func NewSimpleFileFinder(fsimpl Filesystem, opts SimpleFileFinderOptions) (*SimpleFileFinder, error) {
+ if fsimpl == nil {
+ return nil, fmt.Errorf("NewSimpleFileFinder: fsimpl is mandatory")
+ }
+ ct := content.ContentTypeJSON
+ if len(opts.ContentType) != 0 {
+ ct = opts.ContentType
+ }
+ resolver := DefaultFileExtensionResolver
+ if opts.FileExtensionResolver != nil {
+ resolver = opts.FileExtensionResolver
+ }
+ return &SimpleFileFinder{
+ fsimpl: fsimpl,
+ opts: opts,
+ contentTyper: StaticContentTyper{ContentType: ct},
+ resolver: resolver,
+ }, nil
+}
+
+// isObjectIDNamespaced returns true if the ID is of a namespaced GroupKind, and
+// false if the GroupKind is non-namespaced. NOTE: This ONLY works for FileFinders
+// where the Storage has made sure that the namespacing conventions are followed.
+func isObjectIDNamespaced(id core.UnversionedObjectID) bool {
+ return id.ObjectKey().Namespace != ""
+}
+
+var _ FileFinder = &SimpleFileFinder{}
+
+// SimpleFileFinder is a FileFinder-compliant implementation that
+// stores Objects on disk using a straightforward directory layout.
+//
+// The following directory layout is used:
+// if DisableGroupDirectory == false && SubDirectoryFileName == "" {
+// ////. if namespaced or
+// ///. if non-namespaced
+// }
+// else if DisableGroupDirectory == false && SubDirectoryFileName == "foo" {
+// /////foo. if namespaced or
+// ////foo. if non-namespaced
+// }
+// else if DisableGroupDirectory == true && SubDirectoryFileName == "" {
+// ///. if namespaced or
+// //. if non-namespaced
+// }
+// else if DisableGroupDirectory == true && SubDirectoryFileName == "foo" {
+// ////foo. if namespaced or
+// ///foo. if non-namespaced
+// }
+//
+// is resolved by the FileExtensionResolver, for the given ContentType.
+// If is an empty string (as when "apiVersion: v1" is used); will
+// be set to "core".
+//
+// This FileFinder does not support the ObjectAt method.
+type SimpleFileFinder struct {
+ fsimpl Filesystem
+ opts SimpleFileFinderOptions
+ contentTyper StaticContentTyper
+ resolver FileExtensionResolver
+}
+
+type SimpleFileFinderOptions struct {
+ // Default: false; means enable group directory
+ DisableGroupDirectory bool
+ // Default: ""; means use file names as the means of storage
+ SubDirectoryFileName string
+ // Default: content.ContentTypeJSON
+ ContentType content.ContentType
+ // Default: DefaultFileExtensionResolver
+ FileExtensionResolver FileExtensionResolver
+}
+
+func (f *SimpleFileFinder) Filesystem() Filesystem {
+ return f.fsimpl
+}
+
+func (f *SimpleFileFinder) ContentTyper() ContentTyper {
+ return f.contentTyper
+}
+
+// ObjectPath gets the file path relative to the root directory
+func (f *SimpleFileFinder) ObjectPath(ctx context.Context, id core.UnversionedObjectID) (string, error) {
+ // //
+ paths := []string{f.kindKeyPath(id.GroupKind())}
+
+ if isObjectIDNamespaced(id) {
+ // .//
+ paths = append(paths, id.ObjectKey().Namespace)
+ }
+ // Get the file extension
+ ext, err := f.ext()
+ if err != nil {
+ return "", err
+ }
+ if f.opts.SubDirectoryFileName == "" {
+ // ./.
+ paths = append(paths, id.ObjectKey().Name+ext)
+ } else {
+ // .//.
+ paths = append(paths, id.ObjectKey().Name, f.opts.SubDirectoryFileName+ext)
+ }
+ return filepath.Join(paths...), nil
+}
+
+func (f *SimpleFileFinder) kindKeyPath(gk core.GroupKind) string {
+ if f.opts.DisableGroupDirectory {
+ // .//
+ return filepath.Join(gk.Kind)
+ }
+ // Fall back to the "core/v1" storage path for "apiVersion: v1"
+ group := gk.Group
+ if len(group) == 0 {
+ group = "core"
+ }
+ // .///
+ return filepath.Join(group, gk.Kind)
+}
+
+// ObjectsAt retrieves the ObjectIDs in the file with the given relative file path.
+func (f *SimpleFileFinder) ObjectsAt(ctx context.Context, path string) (core.UnversionedObjectIDSet, error) {
+ return nil, core.ErrNotImplemented
+}
+
+func (f *SimpleFileFinder) ext() (string, error) {
+ return f.resolver.ExtensionForContentType(f.contentTyper.ContentType)
+}
+
+// ListGroupKinds returns all known GroupKinds by the implementation at that
+// time. The set might vary over time as data is created and deleted; and
+// should not be treated as an universal "what types could possibly exist",
+// but more generally, "what are the GroupKinds of the objects that currently
+// exist"? However, obviously, specific implementations might honor this
+// guideline differently. This might be used for introspection into the system.
+func (f *SimpleFileFinder) ListGroupKinds(ctx context.Context) ([]core.GroupKind, error) {
+ if f.opts.DisableGroupDirectory {
+ return nil, fmt.Errorf("cannot resolve GroupKinds when group directories are disabled: %w", core.ErrInvalidParameter)
+ }
+
+ // List groups at top-level
+ ctxFs := f.fsimpl.WithContext(ctx)
+ groups, err := readDir(ctxFs, "")
+ if err != nil {
+ return nil, err
+ }
+ // For all groups; also list all kinds, and add to the following list
+ groupKinds := []core.GroupKind{}
+ for _, group := range groups {
+ kinds, err := readDir(ctxFs, group)
+ if err != nil {
+ return nil, err
+ }
+ for _, kind := range kinds {
+ groupKinds = append(groupKinds, core.GroupKind{Group: group, Kind: kind})
+ }
+ }
+ return groupKinds, nil
+}
+
+// ListNamespaces lists the available namespaces for the given GroupKind.
+// This function shall only be called for namespaced objects, it is up to
+// the caller to make sure they do not call this method for root-spaced
+// objects. If any of the given rules are violated, ErrNamespacedMismatch
+// should be returned as a wrapped error.
+//
+// The implementer can choose between basing the answer strictly on e.g.
+// v1.Namespace objects that exist in the system, or just the set of
+// different namespaces that have been set on any object belonging to
+// the given GroupKind.
+func (f *SimpleFileFinder) ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.String, error) {
+ ctxFs := f.fsimpl.WithContext(ctx)
+ entries, err := readDir(ctxFs, f.kindKeyPath(gk))
+ if err != nil {
+ return nil, err
+ }
+ return sets.NewString(entries...), nil
+}
+
+// ListObjectIDs returns a list of unversioned ObjectIDs.
+// For namespaced GroupKinds, the caller must provide a namespace, and for
+// root-spaced GroupKinds, the caller must not. When namespaced, this function
+// must only return object IDs for that given namespace. If any of the given
+// rules are violated, ErrNamespacedMismatch should be returned as a wrapped error.
+func (f *SimpleFileFinder) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) (core.UnversionedObjectIDSet, error) {
+ // If namespace is empty, the names will be in ./, otherwise .//
+ namesDir := filepath.Join(f.kindKeyPath(gk), namespace)
+ ctxFs := f.fsimpl.WithContext(ctx)
+ entries, err := readDir(ctxFs, namesDir)
+ if err != nil {
+ return nil, err
+ }
+ // Get the file extension
+ ext, err := f.ext()
+ if err != nil {
+ return nil, err
+ }
+ // Map the names to UnversionedObjectIDs. We already know how many entries.
+ ids := core.NewUnversionedObjectIDSetSized(len(entries))
+ for _, entry := range entries {
+ // Loop through all entries, and make sure they are sanitized .metadata.name's
+ if f.opts.SubDirectoryFileName != "" {
+ // If f.SubDirectoryFileName != "", the file names already match .metadata.name
+ // Make sure the metadata file ./<.metadata.name>/. actually exists
+ expectedPath := filepath.Join(namesDir, entry, f.opts.SubDirectoryFileName+ext)
+
+ if exists, _ := Exists(ctxFs, expectedPath); !exists {
+ continue
+ }
+ } else {
+ // Storage path is ./.. entry is "."
+ // Verify the extension is there and strip it from name. If ext isn't there, just continue
+ if !strings.HasSuffix(entry, ext) {
+ continue
+ }
+ // Remove the extension from the name
+ entry = strings.TrimSuffix(entry, ext)
+ }
+ // If we got this far, add the key to the list
+ ids.Insert(core.NewUnversionedObjectID(gk, core.ObjectKey{Name: entry, Namespace: namespace}))
+ }
+ return ids, nil
+}
+
+func readDir(ctxFs FS, dir string) ([]string, error) {
+ fi, err := ctxFs.Stat(dir)
+ if errors.Is(err, os.ErrNotExist) {
+ // It's ok if the directory doesn't exist (yet), we just don't have any items then :)
+ return nil, nil
+ } else if !fi.IsDir() {
+ // Unexpected, if the directory actually would be a file
+ return nil, fmt.Errorf("expected that %s is a directory", dir)
+ }
+
+ // When we know that path is a directory, go ahead and read it
+ entries, err := ctxFs.ReadDir(dir)
+ if err != nil {
+ return nil, err
+ }
+ fileNames := make([]string, 0, len(entries))
+ for _, entry := range entries {
+ fileNames = append(fileNames, entry.Name())
+ }
+ return fileNames, nil
+}
diff --git a/pkg/storage/filesystem/filesystem.go b/pkg/storage/filesystem/filesystem.go
new file mode 100644
index 00000000..f0ecd0c8
--- /dev/null
+++ b/pkg/storage/filesystem/filesystem.go
@@ -0,0 +1,164 @@
+package filesystem
+
+import (
+ "context"
+ "errors"
+ "io/fs"
+ "os"
+ "strconv"
+
+ "github.com/spf13/afero"
+ "github.com/weaveworks/libgitops/pkg/storage/commit"
+)
+
+type Filesystem interface {
+ WithContext(ctx context.Context) FS
+ RefResolver() commit.RefResolver
+}
+
+type FS interface {
+ fs.StatFS
+ fs.ReadDirFS
+ fs.ReadFileFS
+
+ // MkdirAll creates a directory path and all parents that does not exist
+ // yet.
+ MkdirAll(path string, perm os.FileMode) error
+ // Remove removes a file identified by name, returning an error, if any
+ // happens.
+ Remove(name string) error
+
+ WriteFile(filename string, data []byte, perm os.FileMode) error
+
+ // Custom methods
+
+ // Checksum returns a checksum of the given file.
+ //
+ // What the checksum is is application-dependent, however, it
+ // should be the same for two invocations, as long as the stored
+ // data is the same. It might change over time although the
+ // underlying data did not. Examples of checksums that can be
+ // used is: the file modification timestamp, a sha256sum of the
+ // file content, or the latest Git commit when the file was
+ // changed.
+ //
+ // Like Stat(filename), os.ErrNotExist is returned if the file does
+ // not exist, such that errors.Is(err, os.ErrNotExist) can be used
+ // to check.
+ Checksum(filename string) (string, error)
+
+ // RootDirectory specifies where on disk the root directory is stored.
+ // This path MUST be absolute. All other paths for the other methods
+ // MUST be relative to this directory.
+ //RootDirectory() (string, error)
+}
+
+type ContextFS interface {
+ Open(ctx context.Context, name string) (fs.File, error)
+ Stat(ctx context.Context, name string) (fs.FileInfo, error)
+ ReadDir(ctx context.Context, name string) ([]fs.DirEntry, error)
+ ReadFile(ctx context.Context, name string) ([]byte, error)
+ MkdirAll(ctx context.Context, path string, perm os.FileMode) error
+ Remove(ctx context.Context, name string) error
+ WriteFile(ctx context.Context, filename string, data []byte, perm os.FileMode) error
+ Checksum(ctx context.Context, filename string) (string, error)
+ //RootDirectory(ctx context.Context) (string, error)
+}
+
+// Exists uses the ctxFs.Stat() method to check whether the file exists.
+// If os.ErrNotExist is returned from the stat call, the return value is
+// false, nil. If another error occurred, then false, err is returned.
+// If err == nil, then true, nil is returned.
+func Exists(ctxFs FS, name string) (bool, error) {
+ _, err := ctxFs.Stat(name)
+ if errors.Is(err, os.ErrNotExist) {
+ return false, nil
+ }
+ if err != nil {
+ return false, err
+ }
+ return true, nil
+}
+
+func FromContext(ctxFs ContextFS) Filesystem {
+ return &fromCtxFs{ctxFs}
+}
+
+type fromCtxFs struct {
+ ctxFs ContextFS
+}
+
+func (f *fromCtxFs) WithContext(ctx context.Context) FS {
+ return &fromCtxFsMapper{f, ctx}
+}
+
+type fromCtxFsMapper struct {
+ *fromCtxFs
+ ctx context.Context
+}
+
+func (f *fromCtxFsMapper) Open(name string) (fs.File, error) {
+ return f.ctxFs.Open(f.ctx, name)
+}
+func (f *fromCtxFsMapper) Stat(name string) (fs.FileInfo, error) {
+ return f.ctxFs.Stat(f.ctx, name)
+}
+func (f *fromCtxFsMapper) ReadDir(name string) ([]fs.DirEntry, error) {
+ return f.ctxFs.ReadDir(f.ctx, name)
+}
+func (f *fromCtxFsMapper) ReadFile(name string) ([]byte, error) {
+ return f.ctxFs.ReadFile(f.ctx, name)
+}
+func (f *fromCtxFsMapper) MkdirAll(path string, perm os.FileMode) error {
+ return f.ctxFs.MkdirAll(f.ctx, path, perm)
+}
+func (f *fromCtxFsMapper) Remove(name string) error {
+ return f.ctxFs.Remove(f.ctx, name)
+}
+func (f *fromCtxFsMapper) WriteFile(filename string, data []byte, perm os.FileMode) error {
+ return f.ctxFs.WriteFile(f.ctx, filename, data, perm)
+}
+func (f *fromCtxFsMapper) Checksum(filename string) (string, error) {
+ return f.ctxFs.Checksum(f.ctx, filename)
+}
+
+// NewOSFilesystem creates a new afero.OsFs for the local directory, using
+// NewFilesystem underneath.
+func NewOSFilesystem(rootDir string) Filesystem {
+ return FilesystemFromAfero(afero.NewOsFs())
+}
+
+// NewFilesystem wraps an underlying afero.Fs without context knowledge,
+// in a Filesystem-compliant implementation; scoped at the given directory
+// (i.e. wrapped in afero.NewBasePathFs(fs, rootDir)).
+//
+// Checksum is calculated based on the modification timestamp of the file.
+func FilesystemFromAfero(fs afero.Fs) Filesystem {
+ // TODO: rootDir validation? It must be absolute, exist, and be a directory.
+ return &nopCtx{&filesystem{afero.NewIOFS(fs)}}
+}
+
+type nopCtx struct {
+ fs FS
+}
+
+func (c *nopCtx) WithContext(context.Context) FS { return c.fs }
+
+type filesystem struct {
+ afero.IOFS
+}
+
+func (f *filesystem) WriteFile(filename string, data []byte, perm os.FileMode) error {
+ return afero.WriteFile(f.IOFS.Fs, filename, data, perm)
+}
+func (f *filesystem) Checksum(filename string) (string, error) {
+ fi, err := f.Stat(filename)
+ if err != nil {
+ return "", err
+ }
+ return checksumFromFileInfo(fi), nil
+}
+
+func checksumFromFileInfo(fi os.FileInfo) string {
+ return strconv.FormatInt(fi.ModTime().UnixNano(), 10)
+}
diff --git a/pkg/storage/filesystem/format.go b/pkg/storage/filesystem/format.go
new file mode 100644
index 00000000..95913500
--- /dev/null
+++ b/pkg/storage/filesystem/format.go
@@ -0,0 +1,92 @@
+package filesystem
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "path/filepath"
+
+ "github.com/weaveworks/libgitops/pkg/content"
+)
+
+var (
+ ErrCannotDetermineContentType = errors.New("cannot determine content type")
+ ErrUnrecognizedContentType = errors.New("unrecognized content type")
+)
+
+// ContentTyper resolves the Content Type of a file given its path and the afero
+// filesystem abstraction, so that it is possible to even examine the file if needed
+// for making the judgement. See DefaultContentTyper for a sample implementation.
+type ContentTyper interface {
+ // ContentTypeForPath should return the content type for the file that exists in
+ // the given Filesystem (path is relative). If the content type cannot be determined
+ // please return a wrapped ErrCannotDetermineContentType error.
+ ContentTypeForPath(ctx context.Context, fs Filesystem, path string) (content.ContentType, error)
+}
+
+// DefaultContentTypes describes the default connection between
+// file extensions and a content types.
+var DefaultContentTyper ContentTyper = ContentTypeForExtension{
+ ".json": content.ContentTypeJSON,
+ ".yaml": content.ContentTypeYAML,
+ ".yml": content.ContentTypeYAML,
+}
+
+// ContentTypeForExtension implements the ContentTyper interface
+// by looking up the extension of the given path in ContentTypeForPath
+// matched against the key of the map. The extension in the map key
+// must start with a dot, e.g. ".json". The value of the map contains
+// the corresponding content type. There might be many extensions which
+// map to the same content type, e.g. both ".yaml" -> ContentTypeYAML
+// and ".yml" -> ContentTypeYAML.
+type ContentTypeForExtension map[string]content.ContentType
+
+func (m ContentTypeForExtension) ContentTypeForPath(ctx context.Context, _ Filesystem, path string) (content.ContentType, error) {
+ ct, ok := m[filepath.Ext(path)]
+ if !ok {
+ return content.ContentType(""), fmt.Errorf("%w for file %q", ErrCannotDetermineContentType, path)
+ }
+ return ct, nil
+}
+
+// StaticContentTyper always responds with the same, statically-set, ContentType for any path.
+type StaticContentTyper struct {
+ // ContentType is a required field
+ ContentType content.ContentType
+}
+
+func (t StaticContentTyper) ContentTypeForPath(_ context.Context, _ Filesystem, _ string) (content.ContentType, error) {
+ if len(t.ContentType) == 0 {
+ return "", fmt.Errorf("StaticContentTyper.ContentType must not be empty")
+ }
+ return t.ContentType, nil
+}
+
+// FileExtensionResolver knows how to resolve what file extension to use for
+// a given ContentType.
+type FileExtensionResolver interface {
+ // ContentTypeExtension returns the file extension for the given ContentType.
+ // The returned string MUST start with a dot, e.g. ".json". If the given
+ // ContentType is not known, it is recommended to return a wrapped
+ // ErrUnrecognizedContentType.
+ ExtensionForContentType(ct content.ContentType) (string, error)
+}
+
+// DefaultFileExtensionResolver describes a default connection between
+// the file extensions and ContentTypes , namely JSON -> ".json" and
+// YAML -> ".yaml".
+var DefaultFileExtensionResolver FileExtensionResolver = ExtensionForContentType{
+ content.ContentTypeJSON: ".json",
+ content.ContentTypeYAML: ".yaml",
+}
+
+// ExtensionForContentType is a simple map implementation of FileExtensionResolver.
+type ExtensionForContentType map[content.ContentType]string
+
+func (m ExtensionForContentType) ExtensionForContentType(ct content.ContentType) (string, error) {
+ ext, ok := m[ct]
+ if !ok {
+ return "", fmt.Errorf("%q: %q", ErrUnrecognizedContentType, ct)
+ }
+ return ext, nil
+}
diff --git a/pkg/storage/filesystem/interfaces.go b/pkg/storage/filesystem/interfaces.go
new file mode 100644
index 00000000..d7638099
--- /dev/null
+++ b/pkg/storage/filesystem/interfaces.go
@@ -0,0 +1,49 @@
+package filesystem
+
+import (
+ "context"
+
+ "github.com/weaveworks/libgitops/pkg/storage"
+ "github.com/weaveworks/libgitops/pkg/storage/core"
+)
+
+// Storage (in this filesystem package) extends storage.Storage by specializing it to operate in a
+// filesystem context, and in other words use a FileFinder to locate the
+// files to operate on.
+type Storage interface {
+ storage.Storage
+
+ // FileFinder returns the underlying FileFinder used.
+ // TODO: Maybe one Storage can have multiple FileFinders?
+ FileFinder() FileFinder
+}
+
+// FileFinder is a generic implementation for locating files on disk, to be
+// used by a Storage.
+//
+// Important: The caller MUST guarantee that the implementation can figure
+// out if the GroupKind is namespaced or not by the following check:
+//
+// namespaced := id.ObjectKey().Namespace != ""
+//
+// In other words, the caller must enforce a namespace being set for namespaced
+// kinds, and namespace not being set for non-namespaced kinds.
+type FileFinder interface {
+ // Filesystem gets the underlying filesystem abstraction, if
+ // applicable.
+ Filesystem() Filesystem
+
+ // ContentTyper gets the underlying ContentTyper used. The ContentTyper
+ // must always return a result although the underlying given path doesn't
+ // exist.
+ ContentTyper() ContentTyper
+
+ // ObjectPath gets the file path relative to the root directory.
+ // In order to support a create operation, this function must also return a valid path for
+ // files that do not yet exist on disk.
+ ObjectPath(ctx context.Context, id core.UnversionedObjectID) (string, error)
+ // ObjectsAt retrieves the ObjectIDs in the file with the given relative file path.
+ ObjectsAt(ctx context.Context, path string) (core.UnversionedObjectIDSet, error)
+ // The FileFinder should be able to list namespaces and Object IDs
+ storage.Lister
+}
diff --git a/pkg/storage/filesystem/path_excluder.go b/pkg/storage/filesystem/path_excluder.go
new file mode 100644
index 00000000..58e8d2a6
--- /dev/null
+++ b/pkg/storage/filesystem/path_excluder.go
@@ -0,0 +1,92 @@
+package filesystem
+
+import (
+ "os"
+ "path/filepath"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/util/sets"
+)
+
+// PathExcluder is an interface that lets the user implement custom policies
+// for whether a given relative path to a given directory (fs is scoped at
+// that directory) should be considered for an operation (e.g. inotify watch
+// or file search).
+type PathExcluder interface {
+ // ShouldExcludePath takes in a relative path to the file which maybe
+ // should be excluded.
+ ShouldExcludePath(path string) bool
+}
+
+// DefaultPathExcluders returns a composition of
+// ExcludeDirectoryNames{} for ".git" dirs and ExcludeExtensions{} for the ".swp" file extensions.
+func DefaultPathExcluders() PathExcluder {
+ return MultiPathExcluder{
+ PathExcluders: []PathExcluder{
+ ExcludeDirectoryNames{
+ DirectoryNamesToExclude: []string{".git"},
+ },
+ ExcludeExtensions{
+ Extensions: []string{".swp"}, // nano creates temporary .swp
+ },
+ },
+ }
+}
+
+// ExcludeDirectoryNames implements PathExcluder.
+var _ PathExcluder = ExcludeDirectoryNames{}
+
+// ExcludeDirectories is a sample implementation of PathExcluder, that excludes
+// files that have any parent directories with the given names.
+type ExcludeDirectoryNames struct {
+ DirectoryNamesToExclude []string
+}
+
+func (e ExcludeDirectoryNames) ShouldExcludePath(path string) bool {
+ parts := strings.Split(filepath.Clean(path), string(os.PathSeparator))
+ return sets.NewString(parts[:len(parts)-1]...).HasAny(e.DirectoryNamesToExclude...)
+}
+
+// ExcludeExtensions implements PathExcluder.
+var _ PathExcluder = ExcludeExtensions{}
+
+// ExcludeExtensions is a sample implementation of PathExcluder, that excludes
+// all files with the given extensions. The strings in the Extensions slice
+// must be in the form of filepath.Ext, i.e. ".json", ".txt", and so forth.
+// The zero value of ExcludeExtensions excludes no files.
+type ExcludeExtensions struct {
+ Extensions []string
+}
+
+func (e ExcludeExtensions) ShouldExcludePath(path string) bool {
+ ext := filepath.Ext(path)
+ for _, exclExt := range e.Extensions {
+ if ext == exclExt {
+ return true
+ }
+ }
+ return false
+}
+
+// MultiPathExcluder implements PathExcluder.
+var _ PathExcluder = &MultiPathExcluder{}
+
+// MultiPathExcluder is a composite PathExcluder that runs all of the
+// PathExcluders in the slice one-by-one, and returns true if any of them
+// does. The zero value of MultiPathExcluder excludes no files.
+type MultiPathExcluder struct {
+ PathExcluders []PathExcluder
+}
+
+func (m MultiPathExcluder) ShouldExcludePath(path string) bool {
+ // Loop through all the excluders, and return true if any of them does
+ for _, excl := range m.PathExcluders {
+ if excl == nil {
+ continue
+ }
+ if excl.ShouldExcludePath(path) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/pkg/storage/filesystem/path_excluder_test.go b/pkg/storage/filesystem/path_excluder_test.go
new file mode 100644
index 00000000..5995fd27
--- /dev/null
+++ b/pkg/storage/filesystem/path_excluder_test.go
@@ -0,0 +1,77 @@
+package filesystem
+
+import (
+ "testing"
+)
+
+func TestExcludeGitDirectory_ShouldExcludePath(t *testing.T) {
+ tests := []struct {
+ name string
+ path string
+ want bool
+ }{
+ {
+ name: "normal",
+ path: ".git/foo",
+ want: true,
+ },
+ {
+ name: "with relative path",
+ path: "./.git/bar/baz",
+ want: true,
+ },
+ {
+ name: "with many parents",
+ path: "/foo/bar/.git/hello",
+ want: true,
+ },
+ {
+ name: "with many children",
+ path: ".git/foo/bar/baz",
+ want: true,
+ },
+ {
+ name: "with parents and children",
+ path: "./foo/bar/.git/baz/bar",
+ want: true,
+ },
+ {
+ name: "empty",
+ path: "",
+ want: false,
+ },
+ {
+ name: "local dir",
+ path: ".",
+ want: false,
+ },
+ {
+ name: "other prefix",
+ path: "foo.git",
+ want: false,
+ },
+ {
+ name: "other suffix",
+ path: ".gitea",
+ want: false,
+ },
+ {
+ name: "absolute path without git",
+ path: "/foo/bar/no/git/here",
+ want: false,
+ },
+ {
+ name: "don't catch files named .git",
+ path: "/hello/.git",
+ want: false,
+ },
+ }
+ e := ExcludeDirectoryNames{DirectoryNamesToExclude: []string{".git"}}
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ if got := e.ShouldExcludePath(tt.path); got != tt.want {
+ t.Errorf("ExcludeGitDirectory.ShouldExcludePath() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
diff --git a/pkg/storage/filesystem/storage.go b/pkg/storage/filesystem/storage.go
new file mode 100644
index 00000000..df9c694b
--- /dev/null
+++ b/pkg/storage/filesystem/storage.go
@@ -0,0 +1,204 @@
+package filesystem
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/weaveworks/libgitops/pkg/content"
+ "github.com/weaveworks/libgitops/pkg/storage"
+ "github.com/weaveworks/libgitops/pkg/storage/commit"
+ "github.com/weaveworks/libgitops/pkg/storage/core"
+ "k8s.io/apimachinery/pkg/util/sets"
+)
+
+// NewGeneric creates a new Generic using the given lower-level
+// FileFinder and Namespacer.
+func NewGeneric(fileFinder FileFinder, namespacer storage.Namespacer) (Storage, error) {
+ if fileFinder == nil {
+ return nil, fmt.Errorf("NewGeneric: fileFinder is mandatory")
+ }
+ if namespacer == nil {
+ return nil, fmt.Errorf("NewGeneric: namespacer is mandatory")
+ }
+
+ return &Generic{
+ fileFinder: fileFinder,
+ namespacer: namespacer,
+ }, nil
+}
+
+// Generic is a Storage-compliant implementation, that
+// combines the given lower-level FileFinder, Namespacer and Filesystem interfaces
+// in a generic manner.
+type Generic struct {
+ fileFinder FileFinder
+ namespacer storage.Namespacer
+}
+
+func (r *Generic) Namespacer() storage.Namespacer {
+ return r.namespacer
+}
+
+func (r *Generic) FileFinder() FileFinder {
+ return r.fileFinder
+}
+
+func (r *Generic) RefResolver() commit.RefResolver {
+ return r.fileFinder.Filesystem().RefResolver()
+}
+
+func (r *Generic) Read(ctx context.Context, id core.UnversionedObjectID) ([]byte, error) {
+ // Get the path and verify namespacing info
+ p, err := r.getPath(ctx, id)
+ if err != nil {
+ return nil, err
+ }
+ // Check if the resource indicated by key exists
+ exists, err := r.exists(ctx, p)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, core.NewErrNotFound(id)
+ }
+ // Read the file
+ return r.FileFinder().Filesystem().WithContext(ctx).ReadFile(p)
+}
+
+func (r *Generic) Exists(ctx context.Context, id core.UnversionedObjectID) (bool, error) {
+ // Get the path and verify namespacing info
+ p, err := r.getPath(ctx, id)
+ if err != nil {
+ return false, err
+ }
+ return r.exists(ctx, p)
+}
+
+func (r *Generic) fsFor(ctx context.Context) FS {
+ return r.FileFinder().Filesystem().WithContext(ctx)
+}
+
+func (r *Generic) exists(ctx context.Context, path string) (bool, error) {
+ return Exists(r.fsFor(ctx), path)
+}
+
+func (r *Generic) Checksum(ctx context.Context, id core.UnversionedObjectID) (string, error) {
+ // Get the path and verify namespacing info
+ p, err := r.getPath(ctx, id)
+ if err != nil {
+ return "", err
+ }
+ // Return a "high level" error if the file does not exist
+ checksum, err := r.fsFor(ctx).Checksum(p)
+ if errors.Is(err, os.ErrNotExist) {
+ return "", core.NewErrNotFound(id)
+ }
+ if err != nil {
+ return "", err
+ }
+ return checksum, nil
+}
+
+func (r *Generic) ContentType(ctx context.Context, id core.UnversionedObjectID) (content.ContentType, error) {
+ // Get the path and verify namespacing info
+ p, err := r.getPath(ctx, id)
+ if err != nil {
+ return "", err
+ }
+ // The object doesn't necessarily need to exist
+ return r.FileFinder().ContentTyper().ContentTypeForPath(ctx, r.fileFinder.Filesystem(), p)
+}
+
+func (r *Generic) Write(ctx context.Context, id core.UnversionedObjectID, content []byte) error {
+ // Get the path and verify namespacing info
+ p, err := r.getPath(ctx, id)
+ if err != nil {
+ return err
+ }
+
+ // Create the underlying directories if they do not exist already
+ exists, err := r.exists(ctx, p)
+ if err != nil {
+ return err
+ }
+ if !exists {
+ if err := r.fsFor(ctx).MkdirAll(filepath.Dir(p), 0755); err != nil {
+ return err
+ }
+ }
+ // Write the file content
+ return r.fsFor(ctx).WriteFile(p, content, 0664)
+}
+
+func (r *Generic) Delete(ctx context.Context, id core.UnversionedObjectID) error {
+ // Get the path and verify namespacing info
+ p, err := r.getPath(ctx, id)
+ if err != nil {
+ return err
+ }
+
+ // Check if the resource indicated by key exists
+ exists, err := r.exists(ctx, p)
+ if err != nil {
+ return err
+ }
+ if !exists {
+ return core.NewErrNotFound(id)
+ }
+ // Remove the file
+ return r.fsFor(ctx).Remove(p)
+}
+
+// ListGroupKinds returns all known GroupKinds by the implementation at that
+// time. The set might vary over time as data is created and deleted; and
+// should not be treated as an universal "what types could possibly exist",
+// but more generally, "what are the GroupKinds of the objects that currently
+// exist"? However, obviously, specific implementations might honor this
+// guideline differently. This might be used for introspection into the system.
+func (r *Generic) ListGroupKinds(ctx context.Context) ([]core.GroupKind, error) {
+ // Just use the underlying filefinder
+ return r.FileFinder().ListGroupKinds(ctx)
+}
+
+// ListNamespaces lists the available namespaces for the given GroupKind.
+// This function shall only be called for namespaced objects, it is up to
+// the caller to make sure they do not call this method for root-spaced
+// objects; for that the behavior is undefined (but returning an error
+// is recommended).
+func (r *Generic) ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.String, error) {
+ namespaced, err := r.namespacer.IsNamespaced(gk)
+ if err != nil {
+ return nil, err
+ }
+ // Validate the groupkind
+ if !namespaced {
+ return nil, fmt.Errorf("%w: cannot list namespaces for non-namespaced kind: %v", storage.ErrNamespacedMismatch, gk)
+ }
+ // Just use the underlying filefinder
+ return r.FileFinder().ListNamespaces(ctx, gk)
+}
+
+// ListObjectIDs returns a list of unversioned ObjectIDs.
+// For namespaced GroupKinds, the caller must provide a namespace, and for
+// root-spaced GroupKinds, the caller must not. When namespaced, this function
+// must only return object IDs for that given namespace.
+func (r *Generic) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) (core.UnversionedObjectIDSet, error) {
+ // Validate the namespace parameter
+ if err := storage.VerifyNamespaced(r.Namespacer(), gk, namespace); err != nil {
+ return nil, err
+ }
+ // Just use the underlying filefinder
+ return r.FileFinder().ListObjectIDs(ctx, gk, namespace)
+}
+
+func (r *Generic) getPath(ctx context.Context, id core.UnversionedObjectID) (string, error) {
+ // Verify namespacing info
+ if err := storage.VerifyNamespaced(r.Namespacer(), id.GroupKind(), id.ObjectKey().Namespace); err != nil {
+ return "", err
+ }
+ // Get the path
+ return r.FileFinder().ObjectPath(ctx, id)
+}
diff --git a/pkg/storage/filesystem/unstructured/btree/btree_cache_test.go b/pkg/storage/filesystem/unstructured/btree/btree_cache_test.go
new file mode 100644
index 00000000..dc9dc841
--- /dev/null
+++ b/pkg/storage/filesystem/unstructured/btree/btree_cache_test.go
@@ -0,0 +1,84 @@
+package btree
+
+/*
+
+func Test_strItem_Less_key(t *testing.T) {
+ tests := []struct {
+ str string
+ cmp btree.Item
+ want bool
+ }{
+ {"", &key{objectID: objectID{core.GroupKind{Group: "foo", Kind: "bar"}, core.ObjectKey{Name: "bar"}}}, true},
+ }
+ for i, tt := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ if got := strItem(tt.str).Less(tt.cmp); got != tt.want {
+ t.Errorf("strItem.Less() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_key_String(t *testing.T) {
+ tests := []struct {
+ objectID objectID
+ want string
+ }{
+ {objID("foo.com", "Bar", "baz", ""), "key:f6377908"},
+ }
+ for i, tt := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ k := &key{objectID: tt.objectID}
+ if got := k.String(); got != tt.want {
+ t.Errorf("key.String() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func objID(group, kind, ns, name string) objectID {
+ return objectID{Kind: core.GroupKind{Group: group, Kind: kind}, Key: core.ObjectKey{Name: name, Namespace: ns}}
+}
+
+type items []ItemQuery
+
+// find returns the index where the given item should be inserted into this
+// list. 'found' is true if the item already exists in the list at the given
+// index.
+func (s items) find(item ItemQuery) (index int, found bool) {
+ i := sort.Search(len(s), func(i int) bool {
+ return item.Less(s[i])
+ })
+ if i > 0 && !s[i-1].Less(item) {
+ return i - 1, true
+ }
+ return i, false
+}
+
+func Test_items_find(t *testing.T) {
+ tests := []struct {
+ list []ItemQuery
+ item ItemQuery
+ wantIndex int
+ wantFound bool
+ }{
+ {
+ list: []ItemQuery{strItem("cc:bb"), strItem("foo:aa:kk"), strItem("foo:bb:kk"), strItem("foo:cc:kk"), strItem("foo:cc")},
+ item: strItem("foo:"),
+ wantIndex: 1,
+ wantFound: false,
+ },
+ }
+ for i, tt := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ gotIndex, gotFound := items(tt.list).find(tt.item)
+ if gotIndex != tt.wantIndex {
+ t.Errorf("items.find() gotIndex = %v, want %v", gotIndex, tt.wantIndex)
+ }
+ if gotFound != tt.wantFound {
+ t.Errorf("items.find() gotFound = %v, want %v", gotFound, tt.wantFound)
+ }
+ })
+ }
+}
+*/
diff --git a/pkg/storage/filesystem/unstructured/btree/btree_index.go b/pkg/storage/filesystem/unstructured/btree/btree_index.go
new file mode 100644
index 00000000..15e5b54f
--- /dev/null
+++ b/pkg/storage/filesystem/unstructured/btree/btree_index.go
@@ -0,0 +1,333 @@
+package btree
+
+import (
+ "strings"
+
+ "github.com/google/btree"
+)
+
+// AbstractItem is the abstract btree.Item, the ultimate base type for the B-Tree's ordering.
+type AbstractItem = btree.Item
+
+// ItemString extends the abstract btree.Item with the "opinion" that all Items in this
+// B-Tree have a string representation that is operated on as the B-Tree key.
+// It should obey the following logic in ItemString.Less(than):
+// - If than is an other ItemString, just perform a "return me < than"
+// - If than is an ItemQuery, let the ItemQuery decide the ordering by calling than.QueryGTE(me)
+type ItemString interface {
+ AbstractItem
+ // String returns the string representation of the given item, this serves as the B-Tree key
+ String() string
+}
+
+// ItemQuery represents a query for the Index, where the user doesn't know the exact string
+// representation of the item that is being searched for. The ItemQuery.Less function should
+// function just as a "return me < than". However, when comparing an ItemString and an ItemQuery,
+// the ItemQuery can fully decide the ordering, because the ItemString delegates the decision to
+// the ItemQuery's QueryGTE function. This allows for flexible searching for items in the tree.
+//
+// When an ordering has been settled, e.g. ItemString1 < ItemQuery <= ItemString2 < ItemString3, and
+// Index.Find() is called, ItemString2 will be returned (i.e. the "next item to the right").
+// When Index.List() is called, the iterator will be called for all it (ascending "to the
+// right") for which ItemQuery.Matches(it) is true.
+//
+// ItemQueries are never persisted in the tree, they are only used for traversing the tree.
+type ItemQuery interface {
+ AbstractItem
+
+ // ItemQuery.QueryGTE(ItemString) is the same as (actually called from) ItemString.Less(ItemQuery).
+ QueryGTE(it ItemString) bool
+ // Matches returns true if the query matches the given item. It is used when iterating, after an
+ // ordering has been finalized.
+ Matches(it ItemString) bool
+}
+
+// Item is the base type that is stored in the B-Tree. There are two main types of Items, ValueItems
+// and indexed pointers to ValueItems. Hence, any Item points to a ValueItem in one way or an other.
+type Item interface {
+ ItemString
+ // GetValueItem returns the ValueItem this Item points to (in the case of an index), or itself
+ // (in the case of Item already being a ValueItem).
+ GetValueItem() ValueItem
+}
+
+// ItemIterator represents a callback function when iterating through a set of items in the tree.
+// As long as true is returned, iteration continues.
+type ItemIterator func(it Item) bool
+
+// ValueItem represents a mapped key to a value that is stored in the B-Tree.
+type ValueItem interface {
+ Item
+
+ // Key returns the key of this mapping
+ Key() interface{}
+ // Value returns the value of this mapping
+ Value() interface{}
+ // IndexedPtrs returns all indexed items that are pointing to this ValueItem
+ IndexedPtrs() []Item
+}
+
+// Index represents one B-Tree that contains key-value mappings indexed by their key and possibly
+// other fields.
+type Index interface {
+ // Get returns an Item in the tree that matches exactly it (i.e. !it.Less(it2) && !it2.Less(it))
+ // Both an ItemString (or higher) or an ItemQuery can be passed to this function.
+ Get(it AbstractItem) (Item, bool)
+ // Put inserts or overwrites the given ValueItem (including related indexes) in the underlying tree.
+ Put(it ValueItem)
+ // Delete deletes the ValueItem (and the related indexes) that is equal to it. True is returned if
+ // such an item actually existed in the tree and was deleted.
+ Delete(it AbstractItem) bool
+
+ // Find returns the next item in ascending order, when the place for the ItemQuery q has been found as:
+ // Item1 < q <= Item2 < Item3
+ // In this example, (Item2, true) would be returned, as long as q.Matches(Item2) == true. Otherwise, or
+ // if q is the maximum of the tree, (nil, false) is returned.
+ // See PrefixQuery and prefixPivotQuery for examples.
+ Find(q ItemQuery) (Item, bool)
+ // List returns the next items in ascending order, when the place for the ItemQuery q has been found as:
+ // Item1 < q <= Item2 < Item3 < Item4
+ // In this example, it in [Item2, Item4] would be iterated, as long as q.Matches(it) == true. When false
+ // is returned from a match, iteration is stopped.
+ // See PrefixQuery and prefixPivotQuery for examples.
+ List(q ItemQuery, iterator ItemIterator)
+
+ // Clear clears the B-Tree completely, but re-uses some nodes for better resource utilization.
+ // It does not disturb other trees that share the same Copy-on-Write base.
+ Clear()
+
+ // Internal returns the underlying B-Tree.
+ Internal() *btree.BTree
+}
+
+type bTreeIndexImpl struct {
+ btree *btree.BTree
+ parentRef string
+}
+
+// Get returns an Item in the tree that matches exactly it (i.e. !it.Less(it2) && !it2.Less(it))
+// Both an ItemString (or higher) or an ItemQuery can be passed to this function.
+func (i *bTreeIndexImpl) Get(it btree.Item) (Item, bool) {
+ found := i.btree.Get(it)
+ if found != nil {
+ return found.(Item), true
+ }
+ return nil, false
+}
+
+// Put inserts or overwrites the given ValueItem (including related indexes) in the underlying tree.
+func (i *bTreeIndexImpl) Put(it ValueItem) {
+ // First, delete any previous, now stale, data related to this item
+ i.deleteIndexes(it)
+ // Add the item to the tree
+ i.btree.ReplaceOrInsert(it)
+ // Register all indexes of it, too
+ for _, idxPtr := range it.IndexedPtrs() {
+ i.btree.ReplaceOrInsert(idxPtr)
+ }
+}
+
+// Delete deletes the ValueItem (and the related indexes) that is equal to it. True is returned if
+// such an item actually existed in the tree and was deleted.
+func (i *bTreeIndexImpl) Delete(it btree.Item) bool {
+ // deleteIndexes returns true if it exists (=> needs to be deleted)
+ if !i.deleteIndexes(it) {
+ return false // nothing to delete
+ }
+
+ // Delete the item itself from the tree
+ i.btree.Delete(it)
+ return true
+}
+
+// deleteIndexes deletes the indexes associated with it
+// true is returned if the deletions were made, false
+// if the item did not exist
+func (i *bTreeIndexImpl) deleteIndexes(it btree.Item) bool {
+ // Deliberately Get the item first, to resolve the ValueItem it points to
+ found, ok := i.Get(it)
+ if !ok {
+ return false // nothing to delete, not found
+ }
+
+ // Delete all indexes of it
+ for _, idxPtr := range found.GetValueItem().IndexedPtrs() {
+ i.btree.Delete(idxPtr)
+ }
+ return true
+}
+
+// Find returns the next item in ascending order, when the place for the ItemQuery q has been found as:
+// Item1 < q <= Item2 < Item3
+// In this example, (Item2, true) would be returned, as long as q.Matches(Item2) == true. Otherwise, or
+// if q is the maximum of the tree, (nil, false) is returned.
+func (i *bTreeIndexImpl) Find(q ItemQuery) (retit Item, found bool) {
+ i.list(q, func(it Item) bool {
+ retit = it
+ found = true
+ return false // only find one item
+ })
+ return // retit, found
+}
+
+// List returns the next items in ascending order, when the place for the ItemQuery q has been found as:
+// Item1 < q <= Item2 < Item3 < Item4
+// In this example, it in [Item2, Item4] would be iterated, as long as q.Matches(it) == true. When false
+// is returned from a match, iteration is stopped.
+func (i *bTreeIndexImpl) List(q ItemQuery, iterator ItemIterator) {
+ i.list(q, iterator)
+}
+
+func (i *bTreeIndexImpl) list(q ItemQuery, iterator ItemIterator) {
+ var ii Item // cache ii between iteration callbacks
+ i.btree.AscendGreaterOrEqual(q, func(i btree.Item) bool {
+ ii = i.(Item)
+ if !q.Matches(ii) { // make sure ii matches the query
+ return false
+ }
+ return iterator(ii)
+ })
+}
+
+func (i *bTreeIndexImpl) Internal() *btree.BTree { return i.btree }
+func (i *bTreeIndexImpl) Clear() { i.btree.Clear(true) }
+
+// NewItemString returns a new ItemString for the given B-Tree key.
+// Custom ValueItems should embed this ItemString to automatically get
+// the expected sorting functionality.
+func NewItemString(key string) ItemString {
+ return &itemString{key}
+}
+
+// itemString implements ItemString
+var _ ItemString = &itemString{}
+
+type itemString struct{ key string }
+
+// Less implements the sorting functionality described in the ItemString godoc.
+// If this Item is compared to an ItemQuery, the ItemQuery should decide the ordering.
+// If this Item is compared to a fellow ItemString, just use simple string comparison.
+func (s *itemString) Less(item btree.Item) bool {
+ switch it := item.(type) {
+ case ItemQuery:
+ return it.QueryGTE(s)
+ case ItemString:
+ return s.key < it.String()
+ default:
+ panic("items must implement either ItemQuery or ItemString")
+ }
+}
+func (s *itemString) String() string { return s.key }
+
+// NewIndexedPtr returns a new Item that for the given key, points to
+// the given ValueItem. This means fields of ValueItems can be indexed
+// using the following key, and added to the B-Tree. ptr must be non-nil
+// otherwise this function will panic. The key of the pointed-to item
+// will be appended to the sort key as well
+func NewIndexedPtr(key string, ptr *ValueItem) Item {
+ if ptr == nil {
+ panic("NewIndexedPtr: ptr must not be nil")
+ }
+ return &indexedPtr{NewItemString(key + ":" + (*ptr).String()), ptr}
+}
+
+// indexedPtr implements Item.
+var _ Item = &indexedPtr{}
+
+// indexedPtr extends the ItemString with the given pointer to the ValueItem.
+type indexedPtr struct {
+ ItemString
+ ptr *ValueItem
+}
+
+func (s *indexedPtr) GetValueItem() ValueItem { return *s.ptr }
+
+// PrefixQuery implements ItemQuery
+var _ ItemQuery = PrefixQuery("")
+
+// PrefixQuery is an ItemQuery that matches all items with the given prefix. For a Find() the smallest
+// item with the given prefix is returned. For a List() the items containing the prefix will be iterated
+// in ascending order (from smallest to largest).
+// Example: bar:xx < foo:aa:aa < foo:aa:bb < foo:bb:aa < xx:yy:zz
+// Find("foo:aa") => "foo:aa:aa"
+// List("foo:aa") => {"foo:aa:aa", "foo:aa:bb"}
+// Find("foo:bb") => "foo:bb:aa"
+// List("foo:bb") => {"foo:bb:aa"}
+type PrefixQuery string
+
+func (s PrefixQuery) Less(item btree.Item) bool { return string(s) < item.(ItemString).String() }
+func (s PrefixQuery) QueryGTE(it ItemString) bool { return it.String() < string(s) }
+
+func (s PrefixQuery) Matches(it ItemString) bool {
+ return strings.HasPrefix(it.String(), string(s))
+}
+
+// NewPrefixPivotQuery returns an ItemQuery that matches all items with the given Prefix, but starting
+// the search for items that don't start with "Prefix+Pivot". A Find() returns the smallest item that does
+// not have the "Prefix+Pivot" prefix, but still contains "Prefix". A List() starts iterating the tree
+// in ascending order (from smallest to largest) from the item returned by Find(). Behavior is undefined
+// if Prefix or Pivot (or both) is an empty string.
+//
+// Example: bar:xx < foo:aa:aa < foo:aa:bb < foo:bb:aa < foo:bb:cc < foo:cc:zz < xx:yy:zz
+// Find(Prefix: "foo:", Pivot: "aa") => "foo:bb:aa"
+// List(Prefix: "foo:", Pivot: "aa") => {"foo:bb:aa", "foo:bb:cc", "foo:cc:zz"}
+// Find(Prefix: "foo:", Pivot: "bb") => "foo:cc:zz"
+// List(Prefix: "foo:", Pivot: "bb") => {"foo:cc:zz"}
+func NewPrefixPivotQuery(prefix, pivot string) ItemQuery {
+ return &prefixPivotQuery{Prefix: prefix, Pivot: pivot}
+}
+
+// prefixPivotQuery implements ItemQuery
+var _ ItemQuery = &prefixPivotQuery{}
+
+type prefixPivotQuery struct {
+ Prefix string
+ Pivot string
+}
+
+func (s *prefixPivotQuery) key() string { return s.Prefix + s.Pivot }
+func (s *prefixPivotQuery) Less(item btree.Item) bool {
+ itStr := item.(ItemString).String()
+ return s.key() < itStr && !strings.HasPrefix(itStr, s.key())
+}
+
+func (s *prefixPivotQuery) QueryGTE(it ItemString) bool {
+ b := it.String() < s.key() || strings.HasPrefix(it.String(), s.key())
+ return b
+}
+func (s *prefixPivotQuery) Matches(it ItemString) bool {
+ return strings.HasPrefix(it.String(), s.Prefix)
+}
+
+// NewStringStringItem returns a new mapping (between a string-encoded key and value), that
+// can be stored in the B-Tree. Keys stored under the same "bucket" prefix together essentially
+// form a "virtual" map[string]string within the B-Tree, but with e.g. copy-on-write support.
+// Any extra indexed fields registered will point to this ValueItem.
+func NewStringStringItem(bucket, key, value string, indexedFields ...string) ValueItem {
+ str := key
+ if len(bucket) != 0 {
+ str = bucket + ":" + key
+ }
+ kvItem := &kvValueItem{
+ ItemString: NewItemString(str),
+ key: key,
+ value: value,
+ indexes: make([]Item, 0, len(indexedFields)),
+ }
+ var valit ValueItem = kvItem
+ for _, indexedField := range indexedFields {
+ kvItem.indexes = append(kvItem.indexes, NewIndexedPtr(indexedField, &valit))
+ }
+ return kvItem
+}
+
+type kvValueItem struct {
+ ItemString
+ key, value string
+ indexes []Item
+}
+
+func (i *kvValueItem) GetValueItem() ValueItem { return i } // this is already a ValueItem
+func (i *kvValueItem) Key() interface{} { return i.key } // just return the plain key
+func (i *kvValueItem) Value() interface{} { return i.value } // just return the plain value
+func (i *kvValueItem) IndexedPtrs() []Item { return i.indexes } // indexes from constructor
diff --git a/pkg/storage/filesystem/unstructured/btree/btree_index_test.go b/pkg/storage/filesystem/unstructured/btree/btree_index_test.go
new file mode 100644
index 00000000..f2a356f7
--- /dev/null
+++ b/pkg/storage/filesystem/unstructured/btree/btree_index_test.go
@@ -0,0 +1,277 @@
+package btree
+
+import (
+ "reflect"
+ "strconv"
+ "testing"
+)
+
+func Test_ItemString_Less_ItemString_QueryPrefix(t *testing.T) {
+ tests := []struct {
+ str string
+ than string
+ want bool
+ }{
+ {"", "", false},
+ {"", "foo", true},
+ {"foo", "", false},
+ {"a", "b", true},
+ {"a:a", "a:b", true},
+ {"a:c", "a:b", false},
+ {"b:a", "a:b", false},
+ {"id:Bar.foo.com", "path:sample-file.yaml", true},
+ {"id:Bar.foo.com", "checksum:123", false},
+ {"path:sample-file.yaml:key:Baz.foo.com:default:foo:sample-file.yaml", "path:sample-file.yaml:key:Bar.foo.com:custom:foo:sample-file.yaml", false},
+ }
+ for i, tt := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ if got := NewItemString(tt.str).Less(NewItemString(tt.than)); got != tt.want {
+ t.Errorf("NewItemString.Less(NewItemString) = %v, want %v", got, tt.want)
+ }
+ if got := NewItemString(tt.str).Less(PrefixQuery(tt.than)); got != tt.want {
+ t.Errorf("NewItemString.Less(PrefixQuery) = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_ItemString_Less_QueryPrefixPivot(t *testing.T) {
+ tests := []struct {
+ str string
+ prefix, pivot string
+ want bool
+ }{
+ {"", "foo", "", true},
+ {"a", "b", "", true},
+ {"a:a", "a:b", "", true},
+ {"a:c", "a:b", "", false},
+ {"b:a", "a:b", "", false},
+ {"id:Bar.foo.com", "path:sample-file.yaml", "", true},
+ {"id:Bar.foo.com", "checksum:123", "", false},
+ {"path:sample-file.yaml:key:Baz.foo.com:default:foo:sample-file.yaml", "path:sample-file.yaml:key:Bar.foo.com:custom:foo:sample-file.yaml", "", false},
+ // bar:xx < foo:aa:aa < foo:aa:bb < foo:bb:aa < foo:bb:cc < foo:cc:zz < xx:yy:zz
+ {"bar:xx", "foo:aa", "aa", true},
+ {"foo:", "foo:aa", "aa", true},
+ {"foo:aa:aa", "foo:", "aa", true},
+ {"foo:aa:bb", "foo:", "aa", true},
+ {"foo:bb:aa", "foo:", "aa", false},
+ {"foo:cc:aa", "foo:", "aa", false},
+ {"foo:bb:bb", "foo:", "bb", true},
+ {"foo:cc:aa", "foo:", "bb", false},
+ }
+ for i, tt := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ if got := NewItemString(tt.str).Less(NewPrefixPivotQuery(tt.prefix, tt.pivot)); got != tt.want {
+ t.Errorf("ItemString.Less() = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_bTreeIndexImpl_Find(t *testing.T) {
+ exampleItems := []string{"bar:xx", "foo:aa:aa", "foo:aa:bb", "foo:bb:aa", "foo:bb:cc", "foo:cc:zz", "xx:yy:zz"}
+ tests := []struct {
+ items []string
+ q ItemQuery
+ wantItem string
+ wantFound bool
+ }{
+ // Test cases for PrefixQuery:
+ {
+ items: exampleItems,
+ q: PrefixQuery(""),
+ wantItem: "bar:xx",
+ wantFound: true,
+ },
+ {
+ // Find("foo:aa") => "foo:aa:aa"
+ items: exampleItems,
+ q: PrefixQuery("foo:aa"),
+ wantItem: "foo:aa:aa",
+ wantFound: true,
+ },
+ {
+ // Find("foo:bb") => "foo:bb:aa"
+ items: exampleItems,
+ q: PrefixQuery("foo:bb"),
+ wantItem: "foo:bb:aa",
+ wantFound: true,
+ },
+ // Test cases for PrefixPivotQuery:
+ {
+ // Find(Prefix: "foo:", Pivot: "aa") => "foo:bb:aa"
+ items: exampleItems,
+ q: NewPrefixPivotQuery("foo:", "aa"),
+ wantItem: "foo:bb:aa",
+ wantFound: true,
+ },
+ {
+ // Find(Prefix: "foo:", Pivot: "bb") => "foo:cc:zz"
+ items: exampleItems,
+ q: NewPrefixPivotQuery("foo:", "bb"),
+ wantItem: "foo:cc:zz",
+ wantFound: true,
+ },
+ }
+ for i, tt := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ i := newIndex(nil)
+ for _, item := range tt.items {
+ i.Put(NewStringStringItem("", item, ""))
+ }
+ gotItem, gotFound := i.Find(tt.q)
+ if gotItem.String() != tt.wantItem {
+ t.Errorf("bTreeIndexImpl.Find() gotRetit = %v, want %v", gotItem.String(), tt.wantItem)
+ }
+ if gotFound != tt.wantFound {
+ t.Errorf("bTreeIndexImpl.Find() gotFound = %v, want %v", gotFound, tt.wantFound)
+ }
+ })
+ }
+}
+
+func Test_Queries_List(t *testing.T) {
+ exampleItems := []string{"bar:xx", "foo:aa:aa", "foo:aa:bb", "foo:bb:aa", "foo:bb:cc", "foo:cc:zz", "xx:yy:zz"}
+ tests := []struct {
+ items []string
+ q ItemQuery
+ want []string
+ }{
+ // Test cases for PrefixQuery:
+ {
+ items: exampleItems,
+ q: PrefixQuery(""),
+ want: exampleItems,
+ },
+ {
+ // List("foo:aa") => {"foo:aa:aa", "foo:aa:bb"}
+ items: exampleItems,
+ q: PrefixQuery("foo:aa"),
+ want: exampleItems[1:3],
+ },
+ {
+ // List("foo:bb") => {"foo:bb:aa", "foo:bb:cc"}
+ items: exampleItems,
+ q: PrefixQuery("foo:bb"),
+ want: exampleItems[3:5],
+ },
+ // Test cases for PrefixPivotQuery:
+ {
+ // List(Prefix: "foo:", Pivot: "aa") => {"foo:bb:aa", "foo:bb:cc", "foo:cc:zz"}
+ items: exampleItems,
+ q: NewPrefixPivotQuery("foo:", "aa"),
+ want: exampleItems[3:6],
+ },
+ {
+ // List(Prefix: "foo:", Pivot: "bb") => {"foo:cc:zz"}
+ items: exampleItems,
+ q: NewPrefixPivotQuery("foo:", "bb"),
+ want: exampleItems[5:6],
+ },
+ }
+ for i, tt := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ i := newIndex(nil)
+ for _, item := range tt.items {
+ i.Put(NewStringStringItem("", item, ""))
+ }
+ got := make([]string, 0, len(tt.want))
+ i.List(tt.q, func(it Item) bool {
+ got = append(got, it.String())
+ return true
+ })
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("bTreeIndexImpl.List() got = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func Test_index_List(t *testing.T) {
+ var (
+ key1 = NewStringStringItem("id", "Bar.foo.com:default:foo", "sample-file.yaml", "path:sample-file.yaml")
+ key2 = NewStringStringItem("id", "Bar.foo.com:default:other-foo", "other-file.yaml", "path:other-file.yaml")
+ key3 = NewStringStringItem("id", "Bar.foo.com:custom:foo", "sample-file.yaml", "path:sample-file.yaml")
+ key4 = NewStringStringItem("id", "Baz.foo.com:default:foo", "sample-file.yaml", "path:sample-file.yaml")
+ )
+ sampleInit := func(i Index) {
+ i.Put(key1)
+ i.Put(key2)
+ i.Put(key3)
+ i.Put(key4)
+ }
+ sampleCleanup := func(i Index) {
+ i.Delete(key1)
+ i.Delete(key2)
+ i.Delete(key3)
+ i.Delete(key4)
+ }
+ tests := []struct {
+ initFunc func(i Index)
+ cleanupFunc func(i Index)
+ prefix string
+ want []ValueItem
+ }{
+ {
+ initFunc: sampleInit,
+ cleanupFunc: sampleCleanup,
+ prefix: "path",
+ want: []ValueItem{key2, key3, key1, key4}, // sorted in order of the index, i.e. the files, and THEN the actual values
+ },
+ {
+ initFunc: sampleInit,
+ cleanupFunc: sampleCleanup,
+ prefix: "path:sample-file.yaml",
+ want: []ValueItem{key3, key1, key4},
+ },
+ {
+ initFunc: sampleInit,
+ cleanupFunc: sampleCleanup,
+ prefix: "id:Bar.foo.com",
+ want: []ValueItem{key3, key1, key2},
+ },
+ {
+ initFunc: sampleInit,
+ cleanupFunc: sampleCleanup,
+ prefix: "id:Baz.foo.com",
+ want: []ValueItem{key4},
+ },
+ {
+ initFunc: sampleInit,
+ cleanupFunc: sampleCleanup,
+ prefix: "id:Bar.foo.com:default",
+ want: []ValueItem{key1, key2},
+ },
+ {
+ initFunc: sampleInit,
+ cleanupFunc: sampleCleanup,
+ prefix: "id:Bar.foo.com:default:foo",
+ want: []ValueItem{key1},
+ },
+ }
+ for i, tt := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ btreeIndex := newIndex(nil)
+ tt.initFunc(btreeIndex)
+ wantStr := make([]string, 0, len(tt.want))
+ for _, it := range tt.want {
+ wantStr = append(wantStr, it.String())
+ }
+
+ got := []string{}
+ btreeIndex.List(PrefixQuery(tt.prefix), func(it Item) bool {
+ got = append(got, it.GetValueItem().String())
+ return true
+ })
+ if !reflect.DeepEqual(got, wantStr) {
+ t.Errorf("got = %v, want %v", got, wantStr)
+ }
+ tt.cleanupFunc(btreeIndex)
+ if l := btreeIndex.Internal().Len(); l != 0 {
+ if !reflect.DeepEqual(got, wantStr) {
+ t.Errorf("expected clean tree, got len = %d", l)
+ }
+ }
+ })
+ }
+}
diff --git a/pkg/storage/filesystem/unstructured/btree/btree_versioned_index.go b/pkg/storage/filesystem/unstructured/btree/btree_versioned_index.go
new file mode 100644
index 00000000..13eca115
--- /dev/null
+++ b/pkg/storage/filesystem/unstructured/btree/btree_versioned_index.go
@@ -0,0 +1,86 @@
+package btree
+
+import (
+ "errors"
+ "fmt"
+
+ "github.com/google/btree"
+)
+
+var (
+ ErrVersionRefNotFound = errors.New("version ref tree not found")
+ ErrVersionRefAlreadyExists = errors.New("version ref tree already exists")
+)
+
+/*
+ New Commit Event:
+ -> UnstructuredStorage.Sync(ctx), where ctx has
+
+*/
+
+// VersionedIndex represents a set of Indexes that are built as copy-on-write
+// extensions on top of each other.
+type VersionedIndex interface {
+ VersionedTree(ref string) (Index, bool)
+ NewVersionedTree(ref, base string) (Index, error)
+ DeleteVersionedTree(ref string)
+}
+
+func NewVersionedIndex() VersionedIndex {
+ return &bTreeVersionedIndexImpl{
+ indexes: make(map[string]Index),
+ freelist: btree.NewFreeList(btree.DefaultFreeListSize),
+ }
+}
+
+type bTreeVersionedIndexImpl struct {
+ indexes map[string]Index
+ freelist *btree.FreeList
+}
+
+func (i *bTreeVersionedIndexImpl) VersionedTree(ref string) (Index, bool) {
+ t, ok := i.indexes[ref]
+ return t, ok
+}
+
+func (i *bTreeVersionedIndexImpl) NewVersionedTree(ref, base string) (Index, error) {
+ // Make sure ref already doesn't exist
+ _, ok := i.VersionedTree(ref)
+ if ok {
+ return nil, fmt.Errorf("%w: %s", ErrVersionRefAlreadyExists, ref)
+ }
+
+ var t2 Index
+ if len(base) != 0 {
+ // Get the base versionref
+ t, ok := i.VersionedTree(base)
+ if !ok {
+ return nil, fmt.Errorf("%w: %s", ErrVersionRefNotFound, base)
+ }
+ // Clone the base BTree
+ t2 = &bTreeIndexImpl{btree: t.Internal().Clone(), parentRef: base}
+ } else {
+ // Create a new BTree with the shared freelist
+ t2 = newIndex(i.freelist)
+ }
+ // Register in the map
+ i.indexes[ref] = t2
+ return t2, nil
+}
+
+func (i *bTreeVersionedIndexImpl) DeleteVersionedTree(ref string) {
+ t, ok := i.VersionedTree(ref)
+ if ok {
+ // Move the nodes of the cow-part of the given BTree to the freelist for re-use
+ t.Internal().Clear(true)
+ }
+ // Just delete the index
+ delete(i.indexes, ref)
+}
+
+func newIndex(freelist *btree.FreeList) Index {
+ if freelist == nil {
+ return &bTreeIndexImpl{btree: btree.New(32)}
+ }
+ return &bTreeIndexImpl{btree: btree.NewWithFreeList(32, freelist)}
+}
diff --git a/pkg/storage/filesystem/unstructured/btree/utils.go b/pkg/storage/filesystem/unstructured/btree/utils.go
new file mode 100644
index 00000000..7a7856dc
--- /dev/null
+++ b/pkg/storage/filesystem/unstructured/btree/utils.go
@@ -0,0 +1,56 @@
+package btree
+
+import "fmt"
+
+// GetValueString searches the Index for an element that is equal to the
+// search parameter. The function tries to cast the ValueItem's Value
+// to either a string or fmt.Stringer, whose value is then returned. If
+// this is unsuccessful, or the item doesn't exist, an empty string is returned.
+// If the search is successful, this function returns true.
+func GetValueString(index Index, search AbstractItem) (string, bool) {
+ it, ok := index.Get(search)
+ if !ok {
+ return "", false
+ }
+ valItem := it.GetValueItem()
+ if valItem == nil {
+ return "", true
+ }
+ switch s := valItem.Value().(type) {
+ case string:
+ return s, true
+ case fmt.Stringer:
+ return s.String(), true
+ }
+ return "", true
+}
+
+// UniqueIterFunc is used in ListUnique.
+type UniqueIterFunc func(it ValueItem) string
+
+// ListUnique traverses the index in ascending order for each item under prefix.
+// However, when an item is matched, the UniqueIterFunc return value decides where to
+// start the search the next time. One possible implementation is to return the
+// name of common part you don't want to see again (e.g. "aa:" in the example below),
+// which will make ListUnique skip all other "duplicate" "foo:aa:*" items.
+//
+// Example:
+// index = {"bar:aa", "foo:aa:bb", "foo:aa:cc", "foo:aa:cc:dd", "foo:bb:cc", "foo:bb:dd", "foo:dd:ee"}
+// prefix = "foo:"
+// iterator returns exclusive == true, and strings.Split(it.Key)[1], e.g. "foo:aa:cc:dd" => "aa"
+// Then the following items will be visited: {"foo:aa:bb", "foo:bb:cc", "foo:dd:ee"}
+func ListUnique(index Index, prefix string, iterator UniqueIterFunc) {
+ it, found := index.Find(PrefixQuery(prefix))
+ if !found {
+ return
+ }
+ q := NewPrefixPivotQuery(prefix, iterator(it.GetValueItem())).(*prefixPivotQuery)
+
+ for {
+ it, found := index.Find(q)
+ if !found {
+ break
+ }
+ q.Pivot = iterator(it.GetValueItem())
+ }
+}
diff --git a/pkg/storage/filesystem/unstructured/btree/utils_test.go b/pkg/storage/filesystem/unstructured/btree/utils_test.go
new file mode 100644
index 00000000..51a18a49
--- /dev/null
+++ b/pkg/storage/filesystem/unstructured/btree/utils_test.go
@@ -0,0 +1,94 @@
+package btree
+
+import (
+ "reflect"
+ "strconv"
+ "strings"
+ "testing"
+)
+
+func TestListUnique(t *testing.T) {
+ allItems := []string{"bar:aa", "foo:aa:bb", "foo:aa:cc", "foo:aa:cc:dd", "foo:aaaa:bla", "foo:bb:cc", "foo:bb:dd", "foo:dd:ee", "xyz:foo"}
+ tests := []struct {
+ items []string
+ prefix string
+ withEndingSep bool
+ want []string
+ }{
+ // Note the difference between these examples:
+ {
+ items: allItems,
+ prefix: "foo:",
+ withEndingSep: true,
+ want: []string{"foo:aa:bb", "foo:aaaa:bla", "foo:bb:cc", "foo:dd:ee"},
+ },
+ {
+ items: allItems,
+ prefix: "foo:",
+ withEndingSep: false,
+ want: []string{"foo:aa:bb", "foo:bb:cc", "foo:dd:ee"},
+ },
+ }
+ for i, tt := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ i := newIndex(nil)
+ for _, item := range tt.items {
+ i.Put(NewStringStringItem("", item, ""))
+ }
+
+ endingSep := ""
+ if tt.withEndingSep {
+ endingSep = ":"
+ }
+
+ got := make([]string, 0, len(tt.want))
+ ListUnique(i, tt.prefix, func(it ValueItem) string {
+ str := it.GetValueItem().String()
+ got = append(got, str)
+ return strings.Split(strings.TrimPrefix(str, tt.prefix), ":")[0] + endingSep
+ })
+
+ if !reflect.DeepEqual(got, tt.want) {
+ t.Errorf("TestListUnique() got = %v, want %v", got, tt.want)
+ }
+ })
+ }
+}
+
+func TestGetValueString(t *testing.T) {
+ tests := []struct {
+ key string
+ value string
+ search string
+ want string
+ found bool
+ }{
+ {
+ key: "foo:bar",
+ value: "hello",
+ search: "foo:bar",
+ want: "hello",
+ found: true,
+ },
+ {
+ key: "foo:bar",
+ value: "hello",
+ search: "notfound",
+ want: "",
+ found: false,
+ },
+ }
+ for i, tt := range tests {
+ t.Run(strconv.Itoa(i), func(t *testing.T) {
+ i := newIndex(nil)
+ i.Put(NewStringStringItem("", tt.key, tt.value))
+ got, found := GetValueString(i, PrefixQuery(tt.search))
+ if got != tt.want {
+ t.Errorf("GetValueString() = %v, want %v", got, tt.want)
+ }
+ if found != tt.found {
+ t.Errorf("GetValueString() = %v, want %v", found, tt.found)
+ }
+ })
+ }
+}
diff --git a/pkg/storage/filesystem/unstructured/event/storage.go b/pkg/storage/filesystem/unstructured/event/storage.go
new file mode 100644
index 00000000..53a64bf6
--- /dev/null
+++ b/pkg/storage/filesystem/unstructured/event/storage.go
@@ -0,0 +1,357 @@
+package unstructuredevent
+
+import (
+ "context"
+ "fmt"
+ gosync "sync"
+
+ "github.com/sirupsen/logrus"
+ "github.com/weaveworks/libgitops/pkg/frame"
+ "github.com/weaveworks/libgitops/pkg/storage"
+ "github.com/weaveworks/libgitops/pkg/storage/core"
+ "github.com/weaveworks/libgitops/pkg/storage/event"
+ "github.com/weaveworks/libgitops/pkg/storage/filesystem"
+ "github.com/weaveworks/libgitops/pkg/storage/filesystem/fileevents"
+ "github.com/weaveworks/libgitops/pkg/storage/filesystem/fileevents/inotify"
+ "github.com/weaveworks/libgitops/pkg/storage/filesystem/unstructured"
+ "github.com/weaveworks/libgitops/pkg/util/sync"
+ "k8s.io/apimachinery/pkg/watch"
+)
+
+// Storage is a union of unstructured.Storage and fileevents.Storage.
+//
+// When the Sync() function is run; the ObjectEvents that are emitted to the
+// listening channels with have ObjectEvent.Type == ObjectEventSync.
+type Storage interface {
+ unstructured.Storage
+ fileevents.Storage
+}
+
+const defaultEventsBufferSize = 4096
+
+// NewManifest is a high-level constructor for a generic
+// unstructured.FileFinder and filesystem.Storage, together with a
+// inotify FileWatcher; all combined into an unstructuredevent.Storage.
+func NewManifest(
+ dir string,
+ contentTyper filesystem.ContentTyper,
+ namespacer storage.Namespacer,
+ recognizer unstructured.ObjectRecognizer,
+ pathExcluder filesystem.PathExcluder,
+) (Storage, error) {
+ fs := filesystem.NewOSFilesystem(dir)
+ fileFinder := unstructured.NewGenericFileFinder(contentTyper, fs)
+ fsRaw, err := filesystem.NewGeneric(fileFinder, namespacer)
+ if err != nil {
+ return nil, err
+ }
+ emitter, err := inotify.NewFileWatcher(dir, &inotify.FileWatcherOptions{
+ PathExcluder: pathExcluder,
+ })
+ if err != nil {
+ return nil, err
+ }
+ unstructuredRaw, err := unstructured.NewGeneric(fsRaw, recognizer, pathExcluder, frame.DefaultFactory())
+ if err != nil {
+ return nil, err
+ }
+ return NewGeneric(unstructuredRaw, emitter, GenericStorageOptions{
+ SyncAtStart: true,
+ EmitSyncEvent: true,
+ })
+}
+
+// NewGeneric is an extended Storage implementation, which
+// together with the provided ObjectRecognizer and FileEventsEmitter listens for
+// file events, keeps the mappings of the unstructured.Storage's unstructured.FileFinder
+// in sync, and sends high-level ObjectEvents upstream.
+func NewGeneric(
+ s unstructured.Storage,
+ emitter fileevents.Emitter,
+ opts GenericStorageOptions,
+) (Storage, error) {
+ return &Generic{
+ Storage: s,
+ emitter: emitter,
+
+ inbound: make(fileevents.FileEventStream, defaultEventsBufferSize),
+ // outbound set by WatchForObjectEvents
+ outboundMu: &gosync.Mutex{},
+
+ // monitor set by WatchForObjectEvents, guarded by outboundMu
+
+ opts: opts,
+ }, nil
+}
+
+type GenericStorageOptions struct {
+ // When Sync(ctx) is run, emit a "SYNC" event to the listening channel
+ // Default: false
+ EmitSyncEvent bool
+ // Do a full re-sync at startup of the watcher
+ // Default: true
+ SyncAtStart bool
+}
+
+// Generic implements unstructuredevent.Storage.
+var _ Storage = &Generic{}
+
+// Generic is an extended Storage implementation, which
+// together with the provided ObjectRecognizer and FileEventsEmitter listens for
+// file events, keeps the mappings of the unstructured.Storage's unstructured.FileFinder
+// in sync, and sends high-level ObjectEvents upstream.
+//
+// This implementation does not support different VersionRefs, but always stays on
+// the "zero value" "" branch. TODO
+type Generic struct {
+ unstructured.Storage
+ // the filesystem events emitter
+ emitter fileevents.Emitter
+
+ // channels
+ inbound fileevents.FileEventStream
+ outbound chan watch.Event
+ outboundMu *gosync.Mutex
+
+ // goroutine
+ monitor *sync.Monitor
+
+ // opts
+ opts GenericStorageOptions
+}
+
+func (s *Generic) FileEventsEmitter() fileevents.Emitter {
+ return s.emitter
+}
+
+/*
+func (s *Generic) WatchForObjectEvents(ctx context.Context, into event.ObjectEventStream) error {
+ s.outboundMu.Lock()
+ defer s.outboundMu.Unlock()
+ // We don't support more than one listener
+ // TODO: maybe support many listeners in the future?
+ if s.outbound != nil {
+ return fmt.Errorf("WatchStorage: not more than one watch supported: %w", fileevents.ErrTooManyWatches)
+ }
+ // Hook up our inbound channel to the emitter, to make the pipeline functional
+ if err := s.emitter.WatchForFileEvents(ctx, s.inbound); err != nil {
+ return err
+ }
+ // Set outbound at this stage so Sync possibly can send events.
+ s.outbound = into
+ // Start the backing goroutines
+ s.monitor = sync.RunMonitor(s.monitorFunc)
+
+ // Do a full sync in the beginning only if asked. Be aware that without running a Sync
+ // at all before events start happening, the reporting might not work as it should
+ if s.opts.SyncAtStart {
+ // Disregard the changed files at Sync.
+ if _, _, err := s.Sync(ctx); err != nil {
+ return err
+ }
+ }
+ return nil // all ok
+}*/
+
+// Sync extends the underlying unstructured.Storage.Sync(), but optionally also
+// sends special "SYNC" and "ERROR" events to the returned "successful" and "duplicates"
+// sets, respectively.
+func (s *Generic) Sync(ctx context.Context) (successful, duplicates core.UnversionedObjectIDSet, err error) {
+ // Sync the underlying UnstructuredStorage, and see what files had changed since last sync
+ successful, duplicates, err = s.Storage.Sync(ctx)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // Send special "sync" or "error" events for each of the changed objects, if configured
+ if s.opts.EmitSyncEvent {
+ _ = successful.ForEach(func(id core.UnversionedObjectID) error {
+ // Send a special "sync" event for this ObjectID to the events channel
+ s.sendEvent(event.ObjectEventSync, id)
+ return nil
+ })
+ _ = duplicates.ForEach(func(id core.UnversionedObjectID) error {
+ // Send an error upstream for the duplicate
+ // TODO: Struct error
+ s.sendError(id, fmt.Errorf("%w: %s", unstructured.ErrTrackingDuplicate, id))
+ return nil
+ })
+ }
+
+ return
+}
+
+// Write writes the given content to the resource indicated by the ID.
+// Error returns are implementation-specific.
+func (s *Generic) Write(ctx context.Context, id core.UnversionedObjectID, content []byte) error {
+ // Get the path and verify namespacing info
+ p, err := s.getPath(ctx, id)
+ if err != nil {
+ return err
+ }
+ // Suspend the write event
+ s.emitter.Suspend(ctx, p)
+ // Call the underlying filesystem.Storage
+ return s.Storage.Write(ctx, id, content)
+}
+
+// Delete deletes the resource indicated by the ID.
+// If the resource does not exist, it returns ErrNotFound.
+func (s *Generic) Delete(ctx context.Context, id core.UnversionedObjectID) error {
+ // Get the path and verify namespacing info
+ p, err := s.getPath(ctx, id)
+ if err != nil {
+ return err
+ }
+ // Suspend the write event
+ s.emitter.Suspend(ctx, p)
+ // Call the underlying filesystem.Storage
+ return s.Storage.Delete(ctx, id)
+}
+
+func (s *Generic) getPath(ctx context.Context, id core.UnversionedObjectID) (string, error) {
+ // Verify namespacing info
+ if err := storage.VerifyNamespaced(s.Namespacer(), id.GroupKind(), id.ObjectKey().Namespace); err != nil {
+ return "", err
+ }
+ // Get the path
+ return s.FileFinder().ObjectPath(ctx, id)
+}
+
+func (s *Generic) Close() error {
+ err := s.emitter.Close()
+ // No need to check the error here
+ _ = s.monitor.Wait()
+ return err
+}
+
+func (s *Generic) monitorFunc() error {
+ logrus.Debug("WatchStorage: Monitoring thread started")
+ defer logrus.Debug("WatchStorage: Monitoring thread stopped")
+
+ ctx := context.Background()
+
+ for {
+ // TODO: handle context cancellations, i.e. ctx.Done()
+ ev, ok := <-s.inbound
+ if !ok {
+ logrus.Error("WatchStorage: Fatal: Got non-ok response from watcher.GetFileEventStream()")
+ return nil
+ }
+
+ logrus.Tracef("WatchStorage: Processing event: %s", ev.Type)
+
+ // Skip the file if it has an invalid path
+ if !filesystem.IsValidFileInFilesystem(
+ ctx,
+ s.FileFinder().Filesystem(),
+ s.FileFinder().ContentTyper(),
+ s.PathExcluder(),
+ ev.Path) {
+ logrus.Tracef("WatchStorage: Skipping file %q as it is ignored by the ContentTyper/PathExcluder", ev.Path)
+ continue
+ }
+
+ var err error
+ switch ev.Type {
+ // FileEventModify is also sent for newly-created files
+ case fileevents.FileEventModify, fileevents.FileEventMove:
+ err = s.handleModifyMove(ctx, ev)
+ case fileevents.FileEventDelete:
+ err = s.handleDelete(ctx, ev)
+ default:
+ err = fmt.Errorf("cannot handle update of type %v for path %q", ev.Type, ev.Path)
+ }
+ if err != nil {
+ logrus.Errorf("WatchStorage: %v", err)
+ }
+ }
+}
+
+func (s *Generic) handleDelete(ctx context.Context, ev *fileevents.FileEvent) error {
+ // Delete the given path from the FileFinder; loop through the deleted objects
+ return s.UnstructuredFileFinder().DeleteMapping(ctx, ev.Path).ForEach(func(id core.UnversionedObjectID) error {
+ // Send the delete event to the channel
+ s.sendEvent(event.ObjectEventDelete, id)
+ return nil
+ })
+}
+
+func (s *Generic) handleModifyMove(ctx context.Context, ev *fileevents.FileEvent) error {
+ fileFinder := s.UnstructuredFileFinder()
+
+ // If the file was moved, move the cached mapping(s) too
+ if ev.Type == fileevents.FileEventMove {
+ // There's no need to check if this move actually was performed; as
+ // if OldPath did not exist previously, the code below will just treat
+ // it as a Create.
+ _ = fileFinder.MoveFile(ctx, ev.OldPath, ev.Path)
+ }
+
+ // Recognize the contents of the file
+ idSet, cp, alreadyCached, err := unstructured.RecognizeIDsInFile(
+ ctx,
+ fileFinder,
+ s.ObjectRecognizer(),
+ s.FrameReaderFactory(),
+ ev.Path,
+ )
+ if err != nil {
+ return err
+ }
+ // If the file is already up-to-date as per the checksum, we're all fine
+ if alreadyCached {
+ return nil
+ }
+
+ // Store this new mapping in the cache
+ added, duplicates, removed := fileFinder.SetMapping(ctx, *cp, idSet)
+
+ // Send added events
+ _ = added.ForEach(func(id core.UnversionedObjectID) error {
+ // Send a create event to the channel
+ s.sendEvent(event.ObjectEventCreate, id)
+ return nil
+ })
+ // Send modify events. Do not mutate idSet unnecessarily.
+ _ = idSet.Copy().
+ DeleteSet(added).
+ DeleteSet(removed).
+ DeleteSet(duplicates).
+ ForEach(func(id core.UnversionedObjectID) error {
+ // Send a update event to the channel
+ s.sendEvent(event.ObjectEventUpdate, id)
+ return nil
+ })
+ // Send removed events
+ _ = removed.ForEach(func(id core.UnversionedObjectID) error {
+ // Send a delete event to the channel
+ s.sendEvent(event.ObjectEventDelete, id)
+ return nil
+ })
+ // Send duplicate error events
+ _ = duplicates.ForEach(func(id core.UnversionedObjectID) error {
+ // Send an error event to the channel
+ s.sendError(id, fmt.Errorf("%w: %q, %s", unstructured.ErrTrackingDuplicate, ev.Path, id))
+ return nil
+ })
+
+ return nil
+}
+
+func (s *Generic) sendEvent(eventType event.ObjectEventType, id core.UnversionedObjectID) {
+ logrus.Tracef("Generic: Sending event: %v", eventType)
+ s.outbound <- &event.ObjectEvent{
+ ID: id,
+ Type: eventType,
+ }
+}
+
+func (s *Generic) sendError(id core.UnversionedObjectID, err error) {
+ logrus.Tracef("Generic: Sending error event for %s: %v", id, err)
+ s.outbound <- &event.ObjectEvent{
+ ID: id,
+ Type: event.ObjectEventError,
+ Error: err,
+ }
+}
diff --git a/pkg/storage/filesystem/unstructured/filefinder_mapped.go b/pkg/storage/filesystem/unstructured/filefinder_mapped.go
new file mode 100644
index 00000000..4f900932
--- /dev/null
+++ b/pkg/storage/filesystem/unstructured/filefinder_mapped.go
@@ -0,0 +1,511 @@
+package unstructured
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+
+ "github.com/sirupsen/logrus"
+ "github.com/weaveworks/libgitops/pkg/storage/core"
+ "github.com/weaveworks/libgitops/pkg/storage/filesystem"
+ "github.com/weaveworks/libgitops/pkg/storage/filesystem/unstructured/btree"
+ utilerrs "k8s.io/apimachinery/pkg/util/errors"
+ "k8s.io/apimachinery/pkg/util/sets"
+)
+
+var (
+ // ErrNotTracked is returned when the requested resource wasn't found.
+ ErrNotTracked = errors.New("untracked object")
+ // ErrTrackingDuplicate is returned when a duplicate of two object IDs in the cache have occurred
+ ErrTrackingDuplicate = errors.New("duplicate object ID; already exists in an other file")
+)
+
+// GenericFileFinder implements FileFinder.
+var _ FileFinder = &GenericFileFinder{}
+
+// NewGenericFileFinder creates a new instance of GenericFileFinder,
+// that implements the FileFinder interface. The contentTyper is optional,
+// by default core.DefaultContentTyper will be used.
+func NewGenericFileFinder(contentTyper filesystem.ContentTyper, fs filesystem.Filesystem) FileFinder {
+ if contentTyper == nil {
+ contentTyper = filesystem.DefaultContentTyper
+ }
+ if fs == nil {
+ panic("NewGenericFileFinder: fs is mandatory")
+ }
+ return &GenericFileFinder{
+ contentTyper: contentTyper,
+ fs: fs,
+ index: btree.NewVersionedIndex(),
+ mu: &sync.RWMutex{},
+ }
+}
+
+// GenericFileFinder is a generic implementation of FileFinder.
+// It uses a ContentTyper to identify what content type a file uses.
+//
+// This implementation relies on that all information about what files exist
+// is fed through {Set,Reset}Mapping. If a file or ID is requested that doesn't
+// exist in the internal cache, ErrNotTracked will be returned.
+//
+// Hence, this implementation does not at the moment support creating net-new
+// Objects without someone calling SetMapping() first.
+type GenericFileFinder struct {
+ // Default: DefaultContentTyper
+ contentTyper filesystem.ContentTyper
+ fs filesystem.Filesystem
+
+ index btree.VersionedIndex
+ // mu guards index
+ mu *sync.RWMutex
+}
+
+func (f *GenericFileFinder) Filesystem() filesystem.Filesystem {
+ return f.fs
+}
+
+func (f *GenericFileFinder) ContentTyper() filesystem.ContentTyper {
+ return f.contentTyper
+}
+
+func (f *GenericFileFinder) versionedIndex(ctx context.Context) (btree.Index, error) {
+ ref := f.Filesystem().RefResolver().GetRef(ctx)
+
+ i, ok := f.index.VersionedTree()
+ if ok {
+ return i, nil
+ }
+ return nil, fmt.Errorf("no such versionref registered")
+}
+
+// ObjectPath gets the file path relative to the root directory
+func (f *GenericFileFinder) ObjectPath(ctx context.Context, id core.UnversionedObjectID) (string, error) {
+ // Lock for reading
+ f.mu.RLock()
+ defer f.mu.RUnlock()
+
+ // Get the versioned tree for the context
+ index, err := f.versionedIndex(ctx)
+ if err != nil {
+ return "", err
+ }
+
+ // Lookup the BTree item for the given ID
+ p, ok := index.Get(queryObject(id))
+ if !ok {
+ return "", utilerrs.NewAggregate([]error{ErrNotTracked, core.NewErrNotFound(id)})
+ }
+ // Return the path
+ return p.GetValueItem().Value().(string), nil
+}
+
+// ObjectsAt retrieves the ObjectIDs in the file with the given relative file path.
+func (f *GenericFileFinder) ObjectsAt(ctx context.Context, path string) (core.UnversionedObjectIDSet, error) {
+ // Lock for reading
+ f.mu.RLock()
+ defer f.mu.RUnlock()
+
+ // Get the versioned tree for the context
+ index, err := f.versionedIndex(ctx)
+ if err != nil {
+ return nil, err
+ }
+ idSet := f.objectsAt(index, path)
+ // Error if there is no such known path
+ if idSet.Len() == 0 {
+ // TODO: Support "creation" of Objects easier, in a generic way through an interface, e.g.
+ // NewObjectPlacer?
+ return nil, fmt.Errorf("%q: %w", path, ErrNotTracked)
+ }
+ return idSet, nil
+}
+
+func (f *GenericFileFinder) objectsAt(index btree.Index, path string) core.UnversionedObjectIDSet {
+ // Traverse the objects belonging to the given path index
+ ids := core.NewUnversionedObjectIDSet()
+ index.List(queryPath(path), func(it btree.Item) bool {
+ // Insert each objectID belonging to that path into the set
+ ids.Insert(it.GetValueItem().Key().(core.UnversionedObjectID))
+ return true
+ })
+ return ids
+}
+
+// ListGroupKinds returns all known GroupKinds by the implementation at that
+// time. The set might vary over time as data is created and deleted; and
+// should not be treated as an universal "what types could possibly exist",
+// but more generally, "what are the GroupKinds of the objects that currently
+// exist"? However, obviously, specific implementations might honor this
+// guideline differently. This might be used for introspection into the system.
+func (f *GenericFileFinder) ListGroupKinds(ctx context.Context) ([]core.GroupKind, error) {
+ // Lock for reading
+ f.mu.RLock()
+ defer f.mu.RUnlock()
+
+ // Get the versioned tree for the context
+ index, err := f.versionedIndex(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ gks := []core.GroupKind{}
+ // List GroupKinds directly under "id:*"
+ prefix := idField + ":"
+ // Extract the GroupKind from the visited item, and return the groupkind, so it
+ // won't be visited again
+ btree.ListUnique(index, prefix, func(it btree.ValueItem) string {
+ gk := it.Key().(core.UnversionedObjectID).GroupKind()
+ gks = append(gks, gk)
+ return gk.String() + ":" // note: important to return this, see btree/utils_test.go why
+ })
+ return gks, nil
+}
+
+// ListNamespaces lists the available namespaces for the given GroupKind.
+// This function shall only be called for namespaced objects, it is up to
+// the caller to make sure they do not call this method for root-spaced
+// objects. If any of the given rules are violated, ErrNamespacedMismatch
+// should be returned as a wrapped error.
+//
+// The implementer can choose between basing the answer strictly on e.g.
+// v1.Namespace objects that exist in the system, or just the set of
+// different namespaces that have been set on any object belonging to
+// the given GroupKind.
+func (f *GenericFileFinder) ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.String, error) {
+ // Lock for reading
+ f.mu.RLock()
+ defer f.mu.RUnlock()
+
+ // Get the versioned tree for the context
+ index, err := f.versionedIndex(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ nsSet := sets.NewString()
+ // List namespaces under "id:{groupkind}:*"
+ prefix := idForGroupKind(gk)
+ // Extract the namespace from the visited item, and return the groupkind exclusively, so it
+ // won't be visited again
+ btree.ListUnique(index, prefix, func(it btree.ValueItem) string {
+ ns := it.Key().(core.UnversionedObjectID).ObjectKey().Namespace
+ nsSet.Insert(ns)
+ return ns + ":" // note: important to return this, see btree/utils_test.go why
+ })
+ return nsSet, nil
+}
+
+// ListObjectIDs returns a list of unversioned ObjectIDs.
+// For namespaced GroupKinds, the caller must provide a namespace, and for
+// root-spaced GroupKinds, the caller must not. When namespaced, this function
+// must only return object IDs for that given namespace. If any of the given
+// rules are violated, ErrNamespacedMismatch should be returned as a wrapped error.
+func (f *GenericFileFinder) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) (core.UnversionedObjectIDSet, error) {
+ // Lock for reading
+ f.mu.RLock()
+ defer f.mu.RUnlock()
+
+ // Get the versioned tree for the context
+ index, err := f.versionedIndex(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ ids := core.NewUnversionedObjectIDSet()
+ // List ObjectIDs under "id:{groupkind}:{ns}:*"
+ index.List(queryNamespace(gk, namespace), func(it btree.Item) bool {
+ ids.Insert(it.GetValueItem().Key().(core.UnversionedObjectID))
+ return true
+ })
+ return ids, nil
+}
+
+// ChecksumForPath retrieves the latest known checksum for the given path.
+func (f *GenericFileFinder) ChecksumForPath(ctx context.Context, path string) (string, bool) {
+ // Lock for reading
+ f.mu.RLock()
+ defer f.mu.RUnlock()
+
+ // Get the versioned tree for the context
+ index, err := f.versionedIndex(ctx)
+ if err != nil {
+ return "", false
+ }
+ return btree.GetValueString(index, queryChecksum(path))
+}
+
+// MoveFile moves an internal mapping from oldPath to newPath. moved == true if the oldPath
+// existed and hence the move was performed.
+func (f *GenericFileFinder) MoveFile(ctx context.Context, oldPath, newPath string) bool {
+ // Lock for writing
+ f.mu.Lock()
+ defer f.mu.Unlock()
+
+ // Get the versioned tree for the context
+ index, err := f.versionedIndex(ctx)
+ if err != nil {
+ logrus.Debugf("MoveFile %s -> %s: got error from versionedIndex: %v", oldPath, newPath, err)
+ return false
+ }
+
+ // Get all the ObjectIDs assigned to the old path
+ idSet := f.objectsAt(index, oldPath)
+ logrus.Tracef("MoveFile: idSet: %s", idSet)
+
+ // Re-assign the IDs to the new path
+ _ = idSet.ForEach(func(id core.UnversionedObjectID) error {
+ index.Put(newIDItem(id, newPath))
+ return nil
+ })
+
+ // Move the checksum info over by
+ // a) getting the checksum for the old path
+ // b) assigning that checksum to the new path
+ // c) deleting the item for the old path
+ checksum, ok := btree.GetValueString(index, queryChecksum(oldPath))
+ if !ok {
+ logrus.Error("MoveFile: Expected checksum to be available, but wasn't")
+ // if this happens; newPath won't be mapped to any checksum; but nothing worse
+ }
+ index.Put(newChecksumItem(newPath, checksum))
+ index.Delete(newChecksumItem(newPath, checksum))
+ logrus.Tracef("MoveFile: Moved checksum from %q to %q", oldPath, newPath)
+
+ return true
+}
+
+// SetMapping sets all the IDs that are stored in this path, for the given, updated checksum.
+// ids must be the exact set of ObjectIDs that are observed at the given path; the previously-stored
+// list will be overwritten. The new checksum will be recorded in the system for this path.
+// The "added" set will record what IDs didn't exist before and were added. "duplicates" are IDs that
+// were technically added, but already existed, mapped to other files in the system. Other files'
+// mappings aren't removed in this function, but no new duplicates are added to this path.
+// Instead such duplicates are returned instead. "removed" contains the set of IDs that existed
+// previously, but were now removed.
+// If ids is an empty set; all mappings to the given path will be removed, and "removed" will contain
+// all prior mappings. (In fact, this is what DeleteMapping does.)
+//
+// ID sets are computed as follows (none of the sets overlap with each other):
+//
+// {ids} => {added} + {duplicates} + {removed} + {modified}
+//
+// {oldIDs} - {removed} + {added} => {newIDs}
+func (f *GenericFileFinder) SetMapping(ctx context.Context, state ChecksumPath, newIDs core.UnversionedObjectIDSet) (added, duplicates, removed core.UnversionedObjectIDSet) {
+ // Lock for writing
+ f.mu.Lock()
+ defer f.mu.Unlock()
+
+ // Get the versioned tree for the context
+ index, err := f.versionedIndex(ctx)
+ if err != nil {
+ // Always return an empty set, although the version ref does not exist
+ added = core.NewUnversionedObjectIDSet()
+ duplicates = core.NewUnversionedObjectIDSet()
+ removed = core.NewUnversionedObjectIDSet()
+ return
+ }
+
+ return f.setIDsAtPath(index, state.Path, state.Checksum, newIDs)
+}
+
+// internal method; not using any mutex; caller's responsibility
+func (f *GenericFileFinder) setIDsAtPath(index btree.Index, path, checksum string, newIDs core.UnversionedObjectIDSet) (added, duplicates, removed core.UnversionedObjectIDSet) {
+ // If there are no new ids, delete the checksum mapping
+ if newIDs.Len() == 0 {
+ index.Delete(queryChecksum(path))
+ } else {
+ // Update the checksum.
+ index.Put(newChecksumItem(path, checksum))
+ }
+
+ // Get the old IDs; and compute the different "buckets"
+ oldIDs := f.objectsAt(index, path)
+ logrus.Tracef("setIDsAtPath: oldIDs: %s", oldIDs)
+ // Get newID entries that are not present in oldIDs
+ added = newIDs.Difference(oldIDs)
+ logrus.Tracef("setIDsAtPath: added: %s", added)
+
+ duplicates = core.NewUnversionedObjectIDSet()
+
+ // Get oldIDs entries that are not present in newIDs
+ removed = oldIDs.Difference(newIDs)
+ logrus.Tracef("setIDsAtPath: removed: %s", removed)
+
+ // Register the added items
+ _ = added.ForEach(func(addedID core.UnversionedObjectID) error {
+ itemToAdd := newIDItem(addedID, path)
+ // Check if this ID already exists in some other file. TODO: Is the second check needed?
+ if otherFile, _ := btree.GetValueString(index, itemToAdd); len(otherFile) != 0 && otherFile != path {
+ // If so; it is a duplicate; move it to duplicates
+ added.Delete(addedID)
+ duplicates.Insert(addedID)
+ return nil
+ }
+ // If it didn't exist somewhere else, add the mapping between this ID and path
+ index.Put(itemToAdd)
+ return nil
+ })
+
+ logrus.Tracef("setIDsAtPath: added post-filter: %s", added)
+ logrus.Tracef("setIDsAtPath: duplicates post-filter: %s", duplicates)
+
+ // Remove the removed items
+ _ = removed.ForEach(func(removedID core.UnversionedObjectID) error {
+ index.Delete(queryObject(removedID))
+ return nil
+ })
+
+ // return the different buckets
+ return added, duplicates, removed
+}
+
+// DeleteMapping removes a mapping for a given path to a file. Previously-stored IDs are returned.
+func (f *GenericFileFinder) DeleteMapping(ctx context.Context, path string) (removed core.UnversionedObjectIDSet) {
+ // Lock for writing
+ f.mu.Lock()
+ defer f.mu.Unlock()
+
+ // Get the versioned tree for the context
+ index, err := f.versionedIndex(ctx)
+ if err != nil {
+ // Always return an empty set, although the version ref does not exist
+ removed = core.NewUnversionedObjectIDSet()
+ return
+ }
+
+ // Re-use the setMappings internal function
+ _, _, removed = f.setIDsAtPath(
+ index, // Get the versioned index
+ path, // Delete mappings at this path
+ "", // No checksum -> delete that mapping
+ core.NewUnversionedObjectIDSet(), // Empty "desired state" -> everything removed
+ )
+ return
+}
+
+// ResetMappings removes all prior data and sets all given mappings at once.
+// Duplicates are NOT stored in the cache at all for this operation, instead they are returned.
+func (f *GenericFileFinder) ResetMappings(ctx context.Context, m map[ChecksumPath]core.UnversionedObjectIDSet) (duplicates core.UnversionedObjectIDSet) {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+
+ // Keep track of all duplicates there are in the mappings.
+ // Always return an empty set, although the version ref does not exist
+ duplicates = core.NewUnversionedObjectIDSet()
+
+ // Get the versioned tree for the context
+ index, err := f.versionedIndex(ctx)
+ if err != nil {
+ return
+ }
+ // Completely clean up all existing data on the branch before starting.
+ index.Clear()
+ logrus.Trace("ResetMappings: cleaned branch")
+
+ // Go through all files and add them to the cache
+ for cp, allIDs := range m {
+ // The first "duplicate" entry will succeed in "making it" to the cache; but all the others
+ // will be registered here. After this iteration of set; remove the duplicates completely
+ // from the cache.
+ logrus.Tracef("ResetMappings: cp %v, allIDs: %s", cp, allIDs)
+
+ // Re-use the internal setMappings function again.
+ // We don't need added & removed here, as we know that {allIDs} = {added} + {newDuplicates}
+ // Removals is always empty as we cleaned all mappings before we started this method.
+ _, newDuplicates, _ := f.setIDsAtPath(index, cp.Path, cp.Checksum, allIDs)
+ logrus.Tracef("ResetMappings: newDuplicates: %s", newDuplicates)
+ // Add all duplicates together so we can process them later
+ duplicates.InsertSet(newDuplicates)
+ }
+
+ logrus.Tracef("ResetMappings: total duplicates: %s", duplicates)
+
+ // Go and "fix up" (i.e. delete) the duplicates that were wrongly added previously
+ // In the resulting mappings; no duplicates are allowed (to avoid "races" at random
+ // between different duplicates otherwise)
+ _ = duplicates.ForEach(func(id core.UnversionedObjectID) error {
+ index.Delete(queryObject(id))
+ return nil
+ })
+
+ return
+}
+
+// RegisterVersionRef registers a new "head" version ref, based (using copy-on-write logic),
+// on the existing versionref "base". head must be non-nil, but base can be nil, if it is
+// desired that "head" has no parent, and hence, is blank. An error is returned if head is
+// nil, or base does not exist.
+func (f *GenericFileFinder) RegisterVersionRef(head, base core.VersionRef) error {
+ if head == nil {
+ return fmt.Errorf("head must not be nil")
+ }
+ baseBranch := ""
+ if base != nil {
+ baseBranch = base.Branch()
+ }
+ _, err := f.index.NewVersionedTree(head.Branch(), baseBranch)
+ return err
+}
+
+// HasVersionRef returns true if the given head version ref has been registered.
+func (f *GenericFileFinder) HasVersionRef(head core.VersionRef) bool {
+ _, ok := f.index.VersionedTree(head.Branch())
+ return ok
+}
+
+// DeleteVersionRef deletes the given head version ref.
+func (f *GenericFileFinder) DeleteVersionRef(head core.VersionRef) {
+ f.index.DeleteVersionedTree(head.Branch())
+}
+
+func idForGroupKind(gk core.GroupKind) string { return idField + ":" + gk.String() + ":" }
+func idForNamespace(gk core.GroupKind, ns string) string { return idForGroupKind(gk) + ns + ":" }
+func queryNamespace(gk core.GroupKind, ns string) btree.ItemQuery {
+ return btree.PrefixQuery(idForNamespace(gk, ns))
+}
+
+func idForObject(id core.UnversionedObjectID) string {
+ return idForNamespace(id.GroupKind(), id.ObjectKey().Namespace) + id.ObjectKey().Name
+}
+func queryObject(id core.UnversionedObjectID) btree.ItemQuery {
+ return btree.PrefixQuery(idForObject(id))
+}
+
+func queryPath(path string) btree.ItemQuery { return btree.PrefixQuery(pathIdxField + ":" + path) }
+func queryChecksum(path string) btree.ItemQuery { return btree.PrefixQuery(checksumField + ":" + path) }
+
+func newChecksumItem(path, checksum string) btree.ValueItem {
+ return btree.NewStringStringItem(checksumField, path, checksum)
+}
+
+func newIDItem(id core.UnversionedObjectID, path string) btree.ValueItem {
+ return &idItemImpl{
+ ItemString: btree.NewItemString(idForObject(id)),
+ id: id,
+ path: path,
+ }
+}
+
+type idItemImpl struct {
+ btree.ItemString
+ id core.UnversionedObjectID
+ path string
+}
+
+const (
+ idField = "id"
+ pathIdxField = "path"
+ checksumField = "chk"
+)
+
+func (i *idItemImpl) GetValueItem() btree.ValueItem { return i }
+func (i *idItemImpl) Key() interface{} { return i.id }
+func (i *idItemImpl) Value() interface{} { return i.path }
+
+func (i *idItemImpl) IndexedPtrs() []btree.Item {
+ var self btree.ValueItem = i
+ return []btree.Item{
+ btree.NewIndexedPtr(pathIdxField+":"+i.path, &self),
+ }
+}
diff --git a/pkg/storage/filesystem/unstructured/interfaces.go b/pkg/storage/filesystem/unstructured/interfaces.go
new file mode 100644
index 00000000..4943b609
--- /dev/null
+++ b/pkg/storage/filesystem/unstructured/interfaces.go
@@ -0,0 +1,112 @@
+package unstructured
+
+import (
+ "context"
+
+ "github.com/weaveworks/libgitops/pkg/frame"
+ "github.com/weaveworks/libgitops/pkg/storage/commit"
+ "github.com/weaveworks/libgitops/pkg/storage/core"
+ "github.com/weaveworks/libgitops/pkg/storage/filesystem"
+)
+
+// Storage is a raw Storage interface that builds on top
+// of Storage. It uses an ObjectRecognizer to recognize
+// otherwise unknown objects in unstructured files.
+// The Storage must use a unstructured.FileFinder underneath.
+type Storage interface {
+ filesystem.Storage
+
+ // Sync synchronizes the current state of the filesystem, and overwrites all
+ // previously cached mappings in the unstructured.FileFinder. "successful"
+ // mappings returned are those that are observed to be distinct. "duplicates"
+ // contains such IDs that weren't distinct; but existed in multiple files.
+ Sync(ctx context.Context) (successful, duplicates core.UnversionedObjectIDSet, err error)
+
+ // ObjectRecognizer returns the underlying ObjectRecognizer used.
+ ObjectRecognizer() ObjectRecognizer
+ // FrameReaderFactory returns the underlying FrameReaderFactory used.
+ FrameReaderFactory() frame.ReaderFactory
+ // PathExcluder specifies what paths to not sync. Can possibly be nil.
+ PathExcluder() filesystem.PathExcluder
+ // UnstructuredFileFinder returns the underlying unstructured.FileFinder used.
+ UnstructuredFileFinder() FileFinder
+}
+
+// ObjectRecognizer recognizes objects stored in files.
+type ObjectRecognizer interface {
+ // RecognizeObjectIDs returns the ObjectIDs present in the file with the given name,
+ // content type and content (in the FrameReader).
+ RecognizeObjectIDs(fileName string, fr frame.Reader) ([]core.ObjectID, error)
+}
+
+// FileFinder is an extension to filesystem.FileFinder that allows it to have an internal
+// cache with mappings between an UnversionedObjectID and a ChecksumPath. This allows
+// higher-order interfaces to manage Objects in files in an unorganized directory
+// (e.g. a Git repo).
+//
+// This implementation supports multiple IDs per file, and can deal with duplicate IDs across
+// distinct file paths. This implementation supports looking at the context for VersionRef info.
+type FileFinder interface {
+ filesystem.FileFinder
+
+ // SetMapping sets all the IDs that are stored in this path, for the given, updated checksum.
+ // ids must be the exact set of ObjectIDs that are observed at the given path; the previously-stored
+ // list will be overwritten. The new checksum will be recorded in the system for this path.
+ // The "added" set will record what IDs didn't exist before and were added. "duplicates" are IDs that
+ // were technically added, but already existed, mapped to other files in the system. Other files'
+ // mappings aren't removed in this function, but no new duplicates are added to this path.
+ // Instead such duplicates are returned instead. "removed" contains the set of IDs that existed
+ // previously, but were now removed.
+ // If ids is an empty set; all mappings to the given path will be removed, and "removed" will contain
+ // all prior mappings. (In fact, this is what DeleteMapping does.)
+ //
+ // ID sets are computed as follows (none of the sets overlap with each other):
+ //
+ // {ids} => {added} + {duplicates} + {removed} + {modified}
+ //
+ // {oldIDs} - {removed} + {added} => {newIDs}
+ SetMapping(ctx context.Context, state ChecksumPath, ids core.UnversionedObjectIDSet) (added, duplicates, removed core.UnversionedObjectIDSet)
+
+ // ResetMappings removes all prior data and sets all given mappings at once.
+ // Duplicates are NOT stored in the cache at all for this operation, instead they are returned.
+ ResetMappings(ctx context.Context, mappings map[ChecksumPath]core.UnversionedObjectIDSet) (duplicates core.UnversionedObjectIDSet)
+
+ // DeleteMapping removes a mapping for a given path to a file. Previously-stored IDs are returned.
+ DeleteMapping(ctx context.Context, path string) (removed core.UnversionedObjectIDSet)
+
+ // ChecksumForPath retrieves the latest known checksum for the given path.
+ ChecksumForPath(ctx context.Context, path string) (string, bool)
+
+ // MoveFile moves an internal mapping from oldPath to newPath. moved == true if the oldPath
+ // existed and hence the move was performed.
+ MoveFile(ctx context.Context, oldPath, newPath string) (moved bool)
+
+ // RegisterVersionRef registers a new "head" version ref, based (using copy-on-write logic),
+ // on the existing versionref "base". head must be non-nil, but base can be nil, if it is
+ // desired that "head" has no parent, and hence, is blank. An error is returned if head is
+ // nil, or base does not exist.
+ RegisterVersionRef(head commit.Ref, base commit.Hash) error
+ // HasVersionRef returns true if the given head version ref has been registered.
+ HasVersionRef(head commit.Ref) bool
+ // DeleteVersionRef deletes the given head version ref.
+ DeleteVersionRef(head commit.Ref)
+}
+
+// ChecksumPath is a tuple of a given Checksum and relative file Path,
+// for use in unstructured.FileFinder.
+type ChecksumPath struct {
+ // Checksum is the checksum of the file at the given path.
+ //
+ // What the checksum is is application-dependent, however, it
+ // should be the same for two invocations, as long as the stored
+ // data is the same. It might change over time although the
+ // underlying data did not. Examples of checksums that can be
+ // used is: the file modification timestamp, a sha256sum of the
+ // file content, or the latest Git commit when the file was
+ // changed.
+ //
+ // The checksum is calculated by the filesystem.Filesystem.
+ Checksum string
+ // Path to the file, relative to filesystem.Filesystem.RootDirectory().
+ Path string
+}
diff --git a/pkg/storage/filesystem/unstructured/recognizer.go b/pkg/storage/filesystem/unstructured/recognizer.go
new file mode 100644
index 00000000..de092c5f
--- /dev/null
+++ b/pkg/storage/filesystem/unstructured/recognizer.go
@@ -0,0 +1,84 @@
+package unstructured
+
+import (
+ "errors"
+ "fmt"
+ "io"
+
+ "github.com/weaveworks/libgitops/pkg/frame"
+ "github.com/weaveworks/libgitops/pkg/serializer"
+ "github.com/weaveworks/libgitops/pkg/storage/core"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// KubeObjectRecognizer implements ObjectRecognizer.
+var _ ObjectRecognizer = KubeObjectRecognizer{}
+
+// KubeObjectRecognizer is a simple implementation of ObjectRecognizer, that
+// decodes the given (possibly multi-frame file) into a *metav1.PartialObjectMetadata,
+// which allows extracting the ObjectID from any Kube API Machinery-compatible Object.
+//
+// This operation works even though *metav1.PartialObjectMetadata is not registered
+// with the underlying Scheme in any way.
+//
+// This implementation enforces that .apiVersion, .kind and .metadata.name fields are
+// non-empty.
+type KubeObjectRecognizer struct {
+ // Decoder is a required field in order for RecognizeObjectIDs to function.
+ Decoder serializer.Decoder
+ // AllowUnrecognized controls whether this implementation allows recognizing
+ // GVK combinations not known to the underlying Scheme. Default: false
+ AllowUnrecognized bool
+ // AllowDuplicates controls whether this implementation allows two exactly similar
+ // ObjectIDs in the same file. Default: false
+ AllowDuplicates bool
+}
+
+func (r KubeObjectRecognizer) RecognizeObjectIDs(_ string, fr frame.Reader) ([]core.ObjectID, error) {
+ if r.Decoder == nil {
+ return nil, errors.New("programmer error: KubeObjectRecognizer.Decoder is nil")
+ }
+
+ ids := []core.ObjectID{}
+ seen := map[core.ObjectID]struct{}{}
+ for {
+ metaObj := &metav1.PartialObjectMetadata{}
+ err := r.Decoder.DecodeInto(fr, metaObj)
+ if err == io.EOF {
+ // If we encountered io.EOF, we know that all is fine and we can exit the for loop and return
+ break
+ } else if err != nil {
+ return nil, err
+ }
+
+ // Validate the object info
+ gvk := metaObj.GroupVersionKind()
+ if gvk.Group == "" && gvk.Version == "" {
+ return nil, fmt.Errorf(".apiVersion field must not be empty")
+ }
+ if gvk.Kind == "" {
+ return nil, fmt.Errorf(".kind field must not be empty")
+ }
+ if metaObj.Name == "" {
+ return nil, fmt.Errorf(".metadata.name field must not be empty")
+ }
+ if !r.AllowUnrecognized && !r.Decoder.GetLockedScheme().Scheme().Recognizes(gvk) {
+ return nil, fmt.Errorf("GroupVersionKind %v not recognized by the scheme", gvk)
+ }
+
+ // Create the ObjectID
+ id := core.NewObjectID(metaObj.GroupVersionKind(), core.ObjectKeyFromMetav1Object(metaObj))
+ // Check if this has been seen before
+ _, idSeen := seen[id]
+ // If this ID has been seen before, but duplicates are disallowed, error
+ if idSeen && !r.AllowDuplicates {
+ return nil, fmt.Errorf("invalid file: two Objects with the same ID: %s", id)
+ }
+ // Add the ID to the list
+ ids = append(ids, id)
+ // Now this ID has been seen
+ seen[id] = struct{}{}
+ }
+
+ return ids, nil
+}
diff --git a/pkg/storage/filesystem/unstructured/storage.go b/pkg/storage/filesystem/unstructured/storage.go
new file mode 100644
index 00000000..ac2bdf3b
--- /dev/null
+++ b/pkg/storage/filesystem/unstructured/storage.go
@@ -0,0 +1,199 @@
+package unstructured
+
+import (
+ "context"
+ "errors"
+ "fmt"
+
+ "github.com/sirupsen/logrus"
+ "github.com/weaveworks/libgitops/pkg/content"
+ "github.com/weaveworks/libgitops/pkg/frame"
+ "github.com/weaveworks/libgitops/pkg/storage/commit"
+ "github.com/weaveworks/libgitops/pkg/storage/core"
+ "github.com/weaveworks/libgitops/pkg/storage/filesystem"
+)
+
+// NewGeneric creates a new generic unstructured.Storage for the given underlying
+// interfaces. storage and recognizer are mandatory, pathExcluder and framingFactory
+// are optional (can be nil). framingFactory defaults to serializer.NewFrameReaderFactory().
+func NewGeneric(
+ storage filesystem.Storage,
+ recognizer ObjectRecognizer,
+ pathExcluder filesystem.PathExcluder,
+ framingFactory frame.ReaderFactory,
+) (Storage, error) {
+ if storage == nil {
+ return nil, fmt.Errorf("storage is mandatory")
+ }
+ if recognizer == nil {
+ return nil, fmt.Errorf("recognizer is mandatory")
+ }
+ // optional: use YAML/JSON by default.
+ if framingFactory == nil {
+ framingFactory = frame.DefaultFactory()
+ }
+ fileFinder, ok := storage.FileFinder().(FileFinder)
+ if !ok {
+ return nil, errors.New("the given filesystem.Storage must use a unstructured.FileFinder")
+ }
+ return &Generic{
+ Storage: storage,
+ recognizer: recognizer,
+ fileFinder: fileFinder,
+ pathExcluder: pathExcluder,
+ framingFactory: framingFactory,
+ }, nil
+}
+
+type Generic struct {
+ filesystem.Storage
+ recognizer ObjectRecognizer
+ fileFinder FileFinder
+ pathExcluder filesystem.PathExcluder
+ framingFactory frame.ReaderFactory
+}
+
+// Sync synchronizes the current state of the filesystem, and overwrites all
+// previously cached mappings in the unstructured.FileFinder. "successful"
+// mappings returned are those that are observed to be distinct. "duplicates"
+// contains such IDs that weren't distinct; but existed in multiple files.
+func (s *Generic) Sync(ctx context.Context) (successful, duplicates core.UnversionedObjectIDSet, err error) {
+ fileFinder := s.UnstructuredFileFinder()
+ fs := fileFinder.Filesystem()
+ contentTyper := fileFinder.ContentTyper()
+
+ // Create pre-made empty sets for the "successful" and "duplicate" IDs
+ // This ensures that although errors occur, we don't return a nil set
+ successful = core.NewUnversionedObjectIDSet()
+ duplicates = core.NewUnversionedObjectIDSet()
+
+ // If the context carries a
+ ref, ok := commit.GetRef(ctx)
+ if ok && !fileFinder.HasVersionRef(ref) {
+ if err = fileFinder.RegisterVersionRef(ref, nil); err != nil {
+ return
+ }
+ }
+
+ // List all valid files in the fs
+ var files []string
+ files, err = filesystem.ListValidFilesInFilesystem(
+ ctx,
+ fs,
+ contentTyper,
+ s.PathExcluder(),
+ )
+ if err != nil {
+ return
+ }
+
+ // Walk all files and fill the mappings of the unstructured.FileFinder.
+ allMappings := make(map[ChecksumPath]core.UnversionedObjectIDSet)
+ objectCount := 0
+
+ for _, filePath := range files {
+ // Recognize the IDs in all the given file
+ idSet, cp, _, err := RecognizeIDsInFile(
+ ctx,
+ fileFinder,
+ s.ObjectRecognizer(),
+ s.FrameReaderFactory(),
+ filePath,
+ )
+ if err != nil {
+ logrus.Error(err)
+ continue
+ }
+ objectCount += idSet.Len()
+ allMappings[*cp] = idSet
+ }
+
+ // ResetMappings overwrites all data at once; so these
+ // mappings are now the "truth" about what's on disk
+ // Duplicate mappings are returned from ResetMappings
+ duplicates = fileFinder.ResetMappings(ctx, allMappings)
+ // For each set of IDs; add them to the "successful" batch
+ for _, set := range allMappings {
+ successful.InsertSet(set)
+ }
+ // Remove the duplicates from the successful bucket
+ successful.DeleteSet(duplicates)
+ return
+}
+
+// ObjectRecognizer returns the underlying ObjectRecognizer used.
+func (s *Generic) ObjectRecognizer() ObjectRecognizer {
+ return s.recognizer
+}
+
+// FrameReaderFactory returns the underlying FrameReaderFactory used.
+func (s *Generic) FrameReaderFactory() frame.ReaderFactory {
+ return s.framingFactory
+}
+
+// PathExcluder specifies what paths to not sync
+func (s *Generic) PathExcluder() filesystem.PathExcluder {
+ return s.pathExcluder
+}
+
+// UnstructuredFileFinder returns the underlying unstructured.FileFinder used.
+func (s *Generic) UnstructuredFileFinder() FileFinder {
+ return s.fileFinder
+}
+
+// RecognizeIDsInFile reads the given file and its content type; and then recognizes it.
+// However, if the checksum is already up-to-date, the function returns directly, without
+// reading the file. In that case, the bool is true (in all other cases, false). The
+// ObjectIDSet and ChecksumPath are returned when err == nil.
+func RecognizeIDsInFile(
+ ctx context.Context,
+ fileFinder FileFinder,
+ recognizer ObjectRecognizer,
+ framingFactory frame.ReaderFactory,
+ filePath string,
+) (core.UnversionedObjectIDSet, *ChecksumPath, bool, error) {
+ fs := fileFinder.Filesystem()
+ contentTyper := fileFinder.ContentTyper()
+
+ // Get the current checksum of the file
+ currentChecksum, err := fs.WithContext(ctx).Checksum(filePath)
+ if err != nil {
+ return nil, nil, false, fmt.Errorf("Could not get checksum for file %q: %v", filePath, err)
+ }
+ cp := &ChecksumPath{Path: filePath, Checksum: currentChecksum}
+
+ // Check the cached checksum
+ cachedChecksum, ok := fileFinder.ChecksumForPath(ctx, filePath)
+ if ok && cachedChecksum == currentChecksum {
+ // If the cache is up-to-date, we don't need to do anything
+ logrus.Tracef("Checksum for file %q is up-to-date: %q, skipping...", filePath, currentChecksum)
+ // Just get the IDs that are cached, and done.
+ idSet, err := fileFinder.ObjectsAt(ctx, filePath)
+ if err != nil {
+ return nil, nil, false, err
+ }
+ return idSet, cp, true, nil
+ }
+
+ // If the file is not known to the FileFinder yet, or if the checksum
+ // was empty, read the file, and recognize it.
+ fileContent, err := fs.WithContext(ctx).ReadFile(filePath)
+ if err != nil {
+ return nil, nil, false, fmt.Errorf("Could not read file %q: %v", filePath, err)
+ }
+ // Get the content type for this file so that we can read it properly
+ ct, err := contentTyper.ContentTypeForPath(ctx, fs, filePath)
+ if err != nil {
+ return nil, nil, false, fmt.Errorf("Could not get content type for file %q: %v", filePath, err)
+ }
+ // Create a new FrameReader for the given ContentType and ReadCloser
+ // TODO: Use a recognizing frame.Reader here
+ fr := framingFactory.NewReader(ct, content.FromBytes(fileContent))
+ // Recognize all IDs in the file
+ versionedIDs, err := recognizer.RecognizeObjectIDs(filePath, fr)
+ if err != nil {
+ return nil, nil, false, fmt.Errorf("Could not recognize object IDs in %q: %v", filePath, err)
+ }
+ // Convert to an unversioned set
+ return core.UnversionedObjectIDSetFromVersionedSlice(versionedIDs), cp, false, nil
+}
diff --git a/pkg/storage/filesystem/unstructured/tx/tx.go b/pkg/storage/filesystem/unstructured/tx/tx.go
new file mode 100644
index 00000000..5593ebdf
--- /dev/null
+++ b/pkg/storage/filesystem/unstructured/tx/tx.go
@@ -0,0 +1,48 @@
+package unstructuredtx
+
+import (
+ "context"
+
+ "github.com/weaveworks/libgitops/pkg/storage/client"
+ "github.com/weaveworks/libgitops/pkg/storage/client/transactional"
+ "github.com/weaveworks/libgitops/pkg/storage/core"
+ "github.com/weaveworks/libgitops/pkg/storage/filesystem"
+ "github.com/weaveworks/libgitops/pkg/storage/filesystem/unstructured"
+)
+
+// NewUnstructuredStorageTxHandler returns a TransactionHook that before the transaction starts
+// informs the unstructured.FileFinder (if used) that the new head branch should be created (if
+// not already exists) using the base branch as the cow baseline.
+func NewUnstructuredStorageTxHandler(c client.Client) transactional.TransactionHook {
+ fsStorage, ok := c.BackendReader().Storage().(filesystem.Storage)
+ if !ok {
+ return nil
+ }
+ fileFinder, ok := fsStorage.FileFinder().(unstructured.FileFinder)
+ if !ok {
+ return nil
+ }
+ return &unstructuredStorageTxHandler{fileFinder}
+}
+
+type unstructuredStorageTxHandler struct {
+ fileFinder unstructured.FileFinder
+}
+
+func (h *unstructuredStorageTxHandler) PreTransactionHook(ctx context.Context, info transactional.TxInfo) error {
+ head := core.NewMutableVersionRef(info.Head)
+ if h.fileFinder.HasVersionRef(head) {
+ return nil // head exists, no-op
+ }
+ base := core.NewMutableVersionRef(info.Base)
+ // If both head and base are the same, and we know that head does not exist in the system, we need to create
+ // head "from scratch" as a "root version"
+ if info.Head == info.Base {
+ base = nil
+ }
+ return h.fileFinder.RegisterVersionRef(head, base)
+}
+
+func (h *unstructuredStorageTxHandler) PostTransactionHook(ctx context.Context, info transactional.TxInfo) error {
+ return nil // cleanup?
+}
diff --git a/pkg/storage/format.go b/pkg/storage/format.go
deleted file mode 100644
index 84993ceb..00000000
--- a/pkg/storage/format.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package storage
-
-import "github.com/weaveworks/libgitops/pkg/serializer"
-
-// ContentTypes describes the connection between
-// file extensions and a content types.
-var ContentTypes = map[string]serializer.ContentType{
- ".json": serializer.ContentTypeJSON,
- ".yaml": serializer.ContentTypeYAML,
- ".yml": serializer.ContentTypeYAML,
-}
-
-func extForContentType(wanted serializer.ContentType) string {
- for ext, ct := range ContentTypes {
- if ct == wanted {
- return ext
- }
- }
- return ""
-}
diff --git a/pkg/storage/interfaces.go b/pkg/storage/interfaces.go
new file mode 100644
index 00000000..fe392646
--- /dev/null
+++ b/pkg/storage/interfaces.go
@@ -0,0 +1,124 @@
+package storage
+
+import (
+ "context"
+ "errors"
+
+ "github.com/weaveworks/libgitops/pkg/content"
+ "github.com/weaveworks/libgitops/pkg/storage/commit"
+ "github.com/weaveworks/libgitops/pkg/storage/core"
+ "k8s.io/apimachinery/pkg/util/sets"
+)
+
+var (
+ // ErrNamespacedMismatch is returned by Storage methods if the given UnversionedObjectID
+ // carries invalid data, according to the Namespacer.
+ ErrNamespacedMismatch = errors.New("mismatch between namespacing info for object and the given parameter")
+)
+
+// Storage is a Key-indexed low-level interface to
+// store byte-encoded Objects (resources) in non-volatile
+// memory.
+//
+// This Storage operates entirely on GroupKinds; without enforcing
+// a specific version of the encoded data format. This is possible
+// with the assumption that any older format stored at disk can be
+// read successfully and converted into a more recent version.
+//
+// TODO: Add thread-safety so it is not possible to issue a Write() or Delete()
+// at the same time as any other read operation.
+type Storage interface {
+ Reader
+ Writer
+}
+
+// StorageCommon is an interface that contains the resources both needed
+// by Reader and Writer.
+type StorageCommon interface {
+ // RefResolver is able to resolve version references to immutable
+ // commit hashes.
+ RefResolver() commit.RefResolver
+ // Namespacer gives access to the namespacer that is used
+ Namespacer() Namespacer
+ // Exists checks if the resource indicated by the ID exists.
+ Exists(ctx context.Context, id core.UnversionedObjectID) (bool, error)
+}
+
+// Namespacer is an interface that lets the caller know if a GroupKind is namespaced
+// or not. There are two ready-made implementations:
+// 1. kube.RESTMapperToNamespacer
+// 2. NewStaticNamespacer
+type Namespacer interface {
+ // IsNamespaced returns true if the GroupKind is a namespaced type
+ IsNamespaced(gk core.GroupKind) (bool, error)
+}
+
+// Reader provides the read operations for the Storage.
+type Reader interface {
+ StorageCommon
+
+ // Read returns a resource's content based on the ID.
+ // If the resource does not exist, it returns core.NewErrNotFound.
+ Read(ctx context.Context, id core.UnversionedObjectID) ([]byte, error)
+
+ // Checksum returns a checksum of the Object with the given ID.
+ //
+ // What the checksum is is application-dependent, however, it
+ // should be the same for two invocations, as long as the stored
+ // data is the same. It might change over time although the
+ // underlying data did not. Examples of checksums that can be
+ // used is: the file modification timestamp, a sha256sum of the
+ // file content, or the latest Git commit when the file was
+ // changed.
+ Checksum(ctx context.Context, id core.UnversionedObjectID) (string, error)
+
+ // ContentType returns the content type that should be used when serializing
+ // the object with the given ID. This operation must function also before the
+ // Object with the given id exists in the system, in order to be able to
+ // create new Objects.
+ ContentType(ctx context.Context, id core.UnversionedObjectID) (content.ContentType, error)
+
+ // List operations
+ Lister
+}
+
+type Lister interface {
+ // ListGroupKinds returns all known GroupKinds by the implementation at that
+ // time. The set might vary over time as data is created and deleted; and
+ // should not be treated as an universal "what types could possibly exist",
+ // but more generally, "what are the GroupKinds of the objects that currently
+ // exist"? However, obviously, specific implementations might honor this
+ // guideline differently. This might be used for introspection into the system.
+ ListGroupKinds(ctx context.Context) ([]core.GroupKind, error)
+
+ // ListNamespaces lists the available namespaces for the given GroupKind.
+ // This function shall only be called for namespaced objects, it is up to
+ // the caller to make sure they do not call this method for root-spaced
+ // objects. If any of the given rules are violated, ErrNamespacedMismatch
+ // should be returned as a wrapped error.
+ //
+ // The implementer can choose between basing the answer strictly on e.g.
+ // v1.Namespace objects that exist in the system, or just the set of
+ // different namespaces that have been set on any object belonging to
+ // the given GroupKind.
+ ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.String, error)
+
+ // ListObjectIDs returns a list of unversioned ObjectIDs.
+ // For namespaced GroupKinds, the caller must provide a namespace, and for
+ // root-spaced GroupKinds, the caller must not. When namespaced, this function
+ // must only return object IDs for that given namespace. If any of the given
+ // rules are violated, ErrNamespacedMismatch should be returned as a wrapped error.
+ ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) (core.UnversionedObjectIDSet, error)
+}
+
+// Reader provides the write operations for the Storage.
+type Writer interface {
+ StorageCommon
+
+ // Write writes the given content to the resource indicated by the ID.
+ // Error returns are implementation-specific.
+ Write(ctx context.Context, id core.UnversionedObjectID, content []byte) error
+ // Delete deletes the resource indicated by the ID.
+ // If the resource does not exist, it returns ErrNotFound.
+ Delete(ctx context.Context, id core.UnversionedObjectID) error
+}
diff --git a/pkg/storage/key.go b/pkg/storage/key.go
deleted file mode 100644
index 015cac41..00000000
--- a/pkg/storage/key.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package storage
-
-import (
- "github.com/weaveworks/libgitops/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
-)
-
-type kindKey schema.GroupVersionKind
-
-func (gvk kindKey) GetGroup() string { return gvk.Group }
-func (gvk kindKey) GetVersion() string { return gvk.Version }
-func (gvk kindKey) GetKind() string { return gvk.Kind }
-func (gvk kindKey) GetGVK() schema.GroupVersionKind { return schema.GroupVersionKind(gvk) }
-func (gvk kindKey) EqualsGVK(kind KindKey, respectVersion bool) bool {
- // Make sure kind and group match, otherwise return false
- if gvk.GetKind() != kind.GetKind() || gvk.GetGroup() != kind.GetGroup() {
- return false
- }
- // If we allow version mismatches (i.e. don't need to respect the version), return true
- if !respectVersion {
- return true
- }
- // Otherwise, return true if the version also is the same
- return gvk.GetVersion() == kind.GetVersion()
-}
-func (gvk kindKey) String() string { return gvk.GetGVK().String() }
-
-// kindKey implements KindKey.
-var _ KindKey = kindKey{}
-
-type KindKey interface {
- // String implements fmt.Stringer
- String() string
-
- GetGroup() string
- GetVersion() string
- GetKind() string
- GetGVK() schema.GroupVersionKind
-
- EqualsGVK(kind KindKey, respectVersion bool) bool
-}
-
-type ObjectKey interface {
- KindKey
- runtime.Identifyable
-}
-
-// objectKey implements ObjectKey.
-var _ ObjectKey = &objectKey{}
-
-type objectKey struct {
- KindKey
- runtime.Identifyable
-}
-
-func (key objectKey) String() string { return key.KindKey.String() + " " + key.GetIdentifier() }
-
-func NewKindKey(gvk schema.GroupVersionKind) KindKey {
- return kindKey(gvk)
-}
-
-func NewObjectKey(kind KindKey, id runtime.Identifyable) ObjectKey {
- return objectKey{kind, id}
-}
diff --git a/pkg/storage/kube/namespaces.go b/pkg/storage/kube/namespaces.go
new file mode 100644
index 00000000..c32cf77e
--- /dev/null
+++ b/pkg/storage/kube/namespaces.go
@@ -0,0 +1,112 @@
+package kube
+
+import (
+ "sync"
+
+ "github.com/weaveworks/libgitops/pkg/storage"
+ "github.com/weaveworks/libgitops/pkg/storage/backend"
+ "github.com/weaveworks/libgitops/pkg/storage/core"
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// TODO: Make an example component that iterates through all of a raw.Storage's
+// or FileFinder's objects, and just reads them, converts them into the current
+// hub version.
+
+// TODO: Make a composite Storage that encrypts secrets using a key
+
+// NewNamespaceEnforcer returns a backend.NamespaceEnforcer that
+// enforces namespacing rules (approximately) in the same way as
+// Kubernetes itself does. The following rules are applied:
+//
+// if object is namespaced {
+// if .metadata.namespace == "" {
+// .metadata.namespace = "default"
+// } else { // .metadata.namespace != ""
+// Make sure that such a v1.Namespace object
+// exists in the system.
+// }
+// } else { // object is non-namespaced
+// if .metadata.namespace != "" {
+// .metadata.namespace = ""
+// }
+// }
+//
+// Underneath, backend.GenericNamespaceEnforcer is used. Refer
+// to the documentation of that if you want the functionality
+// to be slightly different. (e.g. any namespace value is valid).
+//
+// TODO: Maybe we want to validate the namespace string itself?
+func NewNamespaceEnforcer() backend.NamespaceEnforcer {
+ return backend.GenericNamespaceEnforcer{
+ DefaultNamespace: metav1.NamespaceDefault,
+ NamespaceGroupKind: &core.GroupKind{
+ Group: "", // legacy name for the core API group
+ Kind: "Namespace",
+ },
+ }
+}
+
+// SimpleRESTMapper is a subset of the meta.RESTMapper interface
+type SimpleRESTMapper interface {
+ // RESTMapping identifies a preferred resource mapping for the provided group kind.
+ RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error)
+}
+
+// RESTMapperToNamespacer implements the Namespacer interface by fetching (and caching) data
+// from the given RESTMapper interface, that is compatible with any meta.RESTMapper implementation.
+// This allows you to e.g. pass in a meta.RESTMapper yielded from
+// sigs.k8s.io/controller-runtime/pkg/client/apiutil.NewDiscoveryRESTMapper(c *rest.Config), or
+// k8s.io/client-go/restmapper.NewDiscoveryRESTMapper(groups []*restmapper.APIGroupResources)
+// in order to look up namespacing information from either a running API server, or statically, from
+// the list of restmapper.APIGroupResources.
+func RESTMapperToNamespacer(mapper SimpleRESTMapper) storage.Namespacer {
+ return &restNamespacer{
+ mapper: mapper,
+ mappingByType: make(map[schema.GroupKind]*meta.RESTMapping),
+ mu: &sync.RWMutex{},
+ }
+}
+
+var _ storage.Namespacer = &restNamespacer{}
+
+type restNamespacer struct {
+ mapper SimpleRESTMapper
+
+ mappingByType map[schema.GroupKind]*meta.RESTMapping
+ mu *sync.RWMutex
+}
+
+func (n *restNamespacer) IsNamespaced(gk schema.GroupKind) (bool, error) {
+ m, err := n.getMapping(gk)
+ if err != nil {
+ return false, err
+ }
+ return mappingNamespaced(m), nil
+}
+
+func (n *restNamespacer) getMapping(gk schema.GroupKind) (*meta.RESTMapping, error) {
+ n.mu.RLock()
+ mapping, ok := n.mappingByType[gk]
+ n.mu.RUnlock()
+ // If already cached, we're ok
+ if ok {
+ return mapping, nil
+ }
+
+ // Write the mapping info to our cache
+ n.mu.Lock()
+ defer n.mu.Unlock()
+ m, err := n.mapper.RESTMapping(gk)
+ if err != nil {
+ return nil, err
+ }
+ n.mappingByType[gk] = m
+ return m, nil
+}
+
+func mappingNamespaced(mapping *meta.RESTMapping) bool {
+ return mapping.Scope.Name() == meta.RESTScopeNameNamespace
+}
diff --git a/pkg/storage/mappedrawstorage.go b/pkg/storage/mappedrawstorage.go
deleted file mode 100644
index d41641ce..00000000
--- a/pkg/storage/mappedrawstorage.go
+++ /dev/null
@@ -1,177 +0,0 @@
-package storage
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "path/filepath"
- "sync"
-
- log "github.com/sirupsen/logrus"
- "github.com/weaveworks/libgitops/pkg/serializer"
- "github.com/weaveworks/libgitops/pkg/util"
-)
-
-var (
- // ErrNotTracked is returned when the requested resource wasn't found.
- ErrNotTracked = fmt.Errorf("untracked object: %w", ErrNotFound)
-)
-
-// MappedRawStorage is an interface for RawStorages which store their
-// data in a flat/unordered directory format like manifest directories.
-type MappedRawStorage interface {
- RawStorage
-
- // AddMapping binds a Key's virtual path to a physical file path
- AddMapping(key ObjectKey, path string)
- // RemoveMapping removes the physical file
- // path mapping matching the given Key
- RemoveMapping(key ObjectKey)
-
- // SetMappings overwrites all known mappings
- SetMappings(m map[ObjectKey]string)
-}
-
-func NewGenericMappedRawStorage(dir string) MappedRawStorage {
- return &GenericMappedRawStorage{
- dir: dir,
- fileMappings: make(map[ObjectKey]string),
- mux: &sync.Mutex{},
- }
-}
-
-// GenericMappedRawStorage is the default implementation of a MappedRawStorage,
-// it stores files in the given directory via a path translation map.
-type GenericMappedRawStorage struct {
- dir string
- fileMappings map[ObjectKey]string
- mux *sync.Mutex
-}
-
-func (r *GenericMappedRawStorage) realPath(key ObjectKey) (string, error) {
- r.mux.Lock()
- path, ok := r.fileMappings[key]
- r.mux.Unlock()
- if !ok {
- return "", fmt.Errorf("GenericMappedRawStorage: cannot resolve %q: %w", key, ErrNotTracked)
- }
-
- return path, nil
-}
-
-// If the file doesn't exist, returns ErrNotFound + ErrNotTracked.
-func (r *GenericMappedRawStorage) Read(key ObjectKey) ([]byte, error) {
- file, err := r.realPath(key)
- if err != nil {
- return nil, err
- }
-
- return ioutil.ReadFile(file)
-}
-
-func (r *GenericMappedRawStorage) Exists(key ObjectKey) bool {
- file, err := r.realPath(key)
- if err != nil {
- return false
- }
-
- return util.FileExists(file)
-}
-
-func (r *GenericMappedRawStorage) Write(key ObjectKey, content []byte) error {
- // GenericMappedRawStorage isn't going to generate files itself,
- // only write if the file is already known
- file, err := r.realPath(key)
- if err != nil {
- return err
- }
-
- return ioutil.WriteFile(file, content, 0644)
-}
-
-// If the file doesn't exist, returns ErrNotFound + ErrNotTracked.
-func (r *GenericMappedRawStorage) Delete(key ObjectKey) (err error) {
- file, err := r.realPath(key)
- if err != nil {
- return
- }
-
- // GenericMappedRawStorage files can be deleted
- // externally, check that the file exists first
- if util.FileExists(file) {
- err = os.Remove(file)
- }
-
- if err == nil {
- r.RemoveMapping(key)
- }
-
- return
-}
-
-func (r *GenericMappedRawStorage) List(kind KindKey) ([]ObjectKey, error) {
- result := make([]ObjectKey, 0)
-
- for key := range r.fileMappings {
- // Include objects with the same kind and group, ignore version mismatches
- if key.EqualsGVK(kind, false) {
- result = append(result, key)
- }
- }
-
- return result, nil
-}
-
-// This returns the modification time as a UnixNano string.
-// If the file doesn't exist, returns ErrNotFound + ErrNotTracked.
-func (r *GenericMappedRawStorage) Checksum(key ObjectKey) (string, error) {
- path, err := r.realPath(key)
- if err != nil {
- return "", err
- }
-
- return checksumFromModTime(path)
-}
-
-func (r *GenericMappedRawStorage) ContentType(key ObjectKey) (ct serializer.ContentType) {
- if file, err := r.realPath(key); err == nil {
- ct = ContentTypes[filepath.Ext(file)] // Retrieve the correct format based on the extension
- }
-
- return
-}
-
-func (r *GenericMappedRawStorage) WatchDir() string {
- return r.dir
-}
-
-func (r *GenericMappedRawStorage) GetKey(path string) (ObjectKey, error) {
- for key, p := range r.fileMappings {
- if p == path {
- return key, nil
- }
- }
-
- return objectKey{}, fmt.Errorf("no mapping found for path %q", path)
-}
-
-func (r *GenericMappedRawStorage) AddMapping(key ObjectKey, path string) {
- log.Debugf("GenericMappedRawStorage: AddMapping: %q -> %q", key, path)
- r.mux.Lock()
- r.fileMappings[key] = path
- r.mux.Unlock()
-}
-
-func (r *GenericMappedRawStorage) RemoveMapping(key ObjectKey) {
- log.Debugf("GenericMappedRawStorage: RemoveMapping: %q", key)
- r.mux.Lock()
- delete(r.fileMappings, key)
- r.mux.Unlock()
-}
-
-func (r *GenericMappedRawStorage) SetMappings(m map[ObjectKey]string) {
- log.Debugf("GenericMappedRawStorage: SetMappings: %v", m)
- r.mux.Lock()
- r.fileMappings = m
- r.mux.Unlock()
-}
diff --git a/pkg/storage/namespaces.go b/pkg/storage/namespaces.go
new file mode 100644
index 00000000..d6df9cd3
--- /dev/null
+++ b/pkg/storage/namespaces.go
@@ -0,0 +1,37 @@
+package storage
+
+import (
+ "github.com/weaveworks/libgitops/pkg/storage/core"
+)
+
+// StaticNamespacer implements Namespacer
+var _ Namespacer = StaticNamespacer{}
+
+// StaticNamespacer has a default policy, which is that objects are in general namespaced
+// (NamespacedIsDefaultPolicy == true), or that they are in general root-scoped
+// (NamespacedIsDefaultPolicy == false).
+//
+// To the default policy, Exceptions can be added, so that for that GroupKind, the default
+// policy is reversed.
+type StaticNamespacer struct {
+ NamespacedIsDefaultPolicy bool
+ Exceptions []core.GroupKind
+}
+
+func (n StaticNamespacer) IsNamespaced(gk core.GroupKind) (bool, error) {
+ if n.NamespacedIsDefaultPolicy {
+ // namespace by default, the gks list is a list of root-scoped entities
+ return !n.gkIsException(gk), nil
+ }
+ // root by default, the gks in the list are namespaced
+ return n.gkIsException(gk), nil
+}
+
+func (n StaticNamespacer) gkIsException(target core.GroupKind) bool {
+ for _, gk := range n.Exceptions {
+ if gk == target {
+ return true
+ }
+ }
+ return false
+}
diff --git a/pkg/storage/rawstorage.go b/pkg/storage/rawstorage.go
deleted file mode 100644
index 93304332..00000000
--- a/pkg/storage/rawstorage.go
+++ /dev/null
@@ -1,217 +0,0 @@
-package storage
-
-import (
- "fmt"
- "io/ioutil"
- "os"
- "path"
- "path/filepath"
- "strconv"
- "strings"
-
- "github.com/weaveworks/libgitops/pkg/runtime"
- "github.com/weaveworks/libgitops/pkg/serializer"
- "github.com/weaveworks/libgitops/pkg/util"
- "k8s.io/apimachinery/pkg/runtime/schema"
-)
-
-// RawStorage is a Key-indexed low-level interface to
-// store byte-encoded Objects (resources) in non-volatile
-// memory.
-type RawStorage interface {
- // Read returns a resource's content based on key.
- // If the resource does not exist, it returns ErrNotFound.
- Read(key ObjectKey) ([]byte, error)
- // Exists checks if the resource indicated by key exists.
- Exists(key ObjectKey) bool
- // Write writes the given content to the resource indicated by key.
- // Error returns are implementation-specific.
- Write(key ObjectKey, content []byte) error
- // Delete deletes the resource indicated by key.
- // If the resource does not exist, it returns ErrNotFound.
- Delete(key ObjectKey) error
- // List returns all matching object keys based on the given KindKey.
- List(key KindKey) ([]ObjectKey, error)
- // Checksum returns a string checksum for the resource indicated by key.
- // If the resource does not exist, it returns ErrNotFound.
- Checksum(key ObjectKey) (string, error)
- // ContentType returns the content type of the contents of the resource indicated by key.
- ContentType(key ObjectKey) serializer.ContentType
-
- // WatchDir returns the path for Watchers to watch changes in.
- WatchDir() string
- // GetKey retrieves the Key containing the virtual path based
- // on the given physical file path returned by a Watcher.
- GetKey(path string) (ObjectKey, error)
-}
-
-func NewGenericRawStorage(dir string, gv schema.GroupVersion, ct serializer.ContentType) RawStorage {
- ext := extForContentType(ct)
- if ext == "" {
- panic("Invalid content type")
- }
- return &GenericRawStorage{
- dir: dir,
- gv: gv,
- ct: ct,
- ext: ext,
- }
-}
-
-// GenericRawStorage is a rawstorage which stores objects as JSON files on disk,
-// in the form: ///metadata.json.
-// The GenericRawStorage only supports one GroupVersion at a time, and will error if given
-// any other resources
-type GenericRawStorage struct {
- dir string
- gv schema.GroupVersion
- ct serializer.ContentType
- ext string
-}
-
-func (r *GenericRawStorage) keyPath(key ObjectKey) string {
- return path.Join(r.dir, key.GetKind(), key.GetIdentifier(), fmt.Sprintf("metadata%s", r.ext))
-}
-
-func (r *GenericRawStorage) kindKeyPath(kindKey KindKey) string {
- return path.Join(r.dir, kindKey.GetKind())
-}
-
-func (r *GenericRawStorage) validateGroupVersion(kind KindKey) error {
- if r.gv.Group == kind.GetGroup() && r.gv.Version == kind.GetVersion() {
- return nil
- }
-
- return fmt.Errorf("GroupVersion %s/%s not supported by this GenericRawStorage", kind.GetGroup(), kind.GetVersion())
-}
-
-func (r *GenericRawStorage) Read(key ObjectKey) ([]byte, error) {
- // Validate GroupVersion first
- if err := r.validateGroupVersion(key); err != nil {
- return nil, err
- }
-
- // Check if the resource indicated by key exists
- if !r.Exists(key) {
- return nil, ErrNotFound
- }
-
- return ioutil.ReadFile(r.keyPath(key))
-}
-
-func (r *GenericRawStorage) Exists(key ObjectKey) bool {
- // Validate GroupVersion first
- if err := r.validateGroupVersion(key); err != nil {
- return false
- }
-
- return util.FileExists(r.keyPath(key))
-}
-
-func (r *GenericRawStorage) Write(key ObjectKey, content []byte) error {
- // Validate GroupVersion first
- if err := r.validateGroupVersion(key); err != nil {
- return err
- }
-
- file := r.keyPath(key)
-
- // Create the underlying directories if they do not exist already
- if !r.Exists(key) {
- if err := os.MkdirAll(path.Dir(file), 0755); err != nil {
- return err
- }
- }
-
- return ioutil.WriteFile(file, content, 0644)
-}
-
-func (r *GenericRawStorage) Delete(key ObjectKey) error {
- // Validate GroupVersion first
- if err := r.validateGroupVersion(key); err != nil {
- return err
- }
-
- // Check if the resource indicated by key exists
- if !r.Exists(key) {
- return ErrNotFound
- }
-
- return os.RemoveAll(path.Dir(r.keyPath(key)))
-}
-
-func (r *GenericRawStorage) List(kind KindKey) ([]ObjectKey, error) {
- // Validate GroupVersion first
- if err := r.validateGroupVersion(kind); err != nil {
- return nil, err
- }
-
- entries, err := ioutil.ReadDir(r.kindKeyPath(kind))
- if err != nil {
- return nil, err
- }
-
- result := make([]ObjectKey, 0, len(entries))
- for _, entry := range entries {
- result = append(result, NewObjectKey(kind, runtime.NewIdentifier(entry.Name())))
- }
-
- return result, nil
-}
-
-// This returns the modification time as a UnixNano string
-// If the file doesn't exist, return ErrNotFound
-func (r *GenericRawStorage) Checksum(key ObjectKey) (string, error) {
- // Validate GroupVersion first
- if err := r.validateGroupVersion(key); err != nil {
- return "", err
- }
-
- // Check if the resource indicated by key exists
- if !r.Exists(key) {
- return "", ErrNotFound
- }
-
- return checksumFromModTime(r.keyPath(key))
-}
-
-func (r *GenericRawStorage) ContentType(_ ObjectKey) serializer.ContentType {
- return r.ct
-}
-
-func (r *GenericRawStorage) WatchDir() string {
- return r.dir
-}
-
-func (r *GenericRawStorage) GetKey(p string) (ObjectKey, error) {
- splitDir := strings.Split(filepath.Clean(r.dir), string(os.PathSeparator))
- splitPath := strings.Split(filepath.Clean(p), string(os.PathSeparator))
-
- if len(splitPath) < len(splitDir)+2 {
- return nil, fmt.Errorf("path not long enough: %s", p)
- }
-
- for i := 0; i < len(splitDir); i++ {
- if splitDir[i] != splitPath[i] {
- return nil, fmt.Errorf("path has wrong base: %s", p)
- }
- }
- kind := splitPath[len(splitDir)]
- uid := splitPath[len(splitDir)+1]
- gvk := schema.GroupVersionKind{
- Group: r.gv.Group,
- Version: r.gv.Version,
- Kind: kind,
- }
-
- return NewObjectKey(NewKindKey(gvk), runtime.NewIdentifier(uid)), nil
-}
-
-func checksumFromModTime(path string) (string, error) {
- fi, err := os.Stat(path)
- if err != nil {
- return "", err
- }
-
- return strconv.FormatInt(fi.ModTime().UnixNano(), 10), nil
-}
diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go
deleted file mode 100644
index 4d942324..00000000
--- a/pkg/storage/storage.go
+++ /dev/null
@@ -1,454 +0,0 @@
-package storage
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
-
- "github.com/sirupsen/logrus"
- "github.com/weaveworks/libgitops/pkg/filter"
- "github.com/weaveworks/libgitops/pkg/runtime"
- "github.com/weaveworks/libgitops/pkg/serializer"
- patchutil "github.com/weaveworks/libgitops/pkg/util/patch"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- kruntime "k8s.io/apimachinery/pkg/runtime"
- "k8s.io/apimachinery/pkg/runtime/schema"
-)
-
-var (
- // ErrAmbiguousFind is returned when the user requested one object from a List+Filter process.
- ErrAmbiguousFind = errors.New("two or more results were aquired when one was expected")
- // ErrNotFound is returned when the requested resource wasn't found.
- ErrNotFound = errors.New("resource not found")
- // ErrAlreadyExists is returned when when WriteStorage.Create is called for an already stored object.
- ErrAlreadyExists = errors.New("resource already exists")
-)
-
-type ReadStorage interface {
- // Get returns a new Object for the resource at the specified kind/uid path, based on the file content.
- // If the resource referred to by the given ObjectKey does not exist, Get returns ErrNotFound.
- Get(key ObjectKey) (runtime.Object, error)
-
- // List lists Objects for the specific kind. Optionally, filters can be applied (see the filter package
- // for more information, e.g. filter.NameFilter{} and filter.UIDFilter{})
- List(kind KindKey, opts ...filter.ListOption) ([]runtime.Object, error)
-
- // Find does a List underneath, also using filters, but always returns one object. If the List
- // underneath returned two or more results, ErrAmbiguousFind is returned. If no match was found,
- // ErrNotFound is returned.
- Find(kind KindKey, opts ...filter.ListOption) (runtime.Object, error)
-
- //
- // Partial object getters.
- // TODO: Figure out what we should do with these, do we need them and if so where?
- //
-
- // GetMeta returns a new Object's APIType representation for the resource at the specified kind/uid path.
- // If the resource referred to by the given ObjectKey does not exist, GetMeta returns ErrNotFound.
- GetMeta(key ObjectKey) (runtime.PartialObject, error)
- // ListMeta lists all Objects' APIType representation. In other words,
- // only metadata about each Object is unmarshalled (uid/name/kind/apiVersion).
- // This allows for faster runs (no need to unmarshal "the world"), and less
- // resource usage, when only metadata is unmarshalled into memory
- ListMeta(kind KindKey) ([]runtime.PartialObject, error)
-
- //
- // Cache-related methods.
- //
-
- // Checksum returns a string representing the state of an Object on disk
- // The checksum should change if any modifications have been made to the
- // Object on disk, it can be e.g. the Object's modification timestamp or
- // calculated checksum. If the Object is not found, ErrNotFound is returned.
- Checksum(key ObjectKey) (string, error)
- // Count returns the amount of available Objects of a specific kind
- // This is used by Caches to check if all Objects are cached to perform a List
- Count(kind KindKey) (uint64, error)
-
- //
- // Access to underlying Resources.
- //
-
- // RawStorage returns the RawStorage instance backing this Storage
- RawStorage() RawStorage
- // Serializer returns the serializer
- Serializer() serializer.Serializer
-
- //
- // Misc methods.
- //
-
- // ObjectKeyFor returns the ObjectKey for the given object
- ObjectKeyFor(obj runtime.Object) (ObjectKey, error)
- // Close closes all underlying resources (e.g. goroutines) used; before the application exits
- Close() error
-}
-
-type WriteStorage interface {
- // Create creates an entry for and stores the given Object in the storage. The Object must be new to the storage.
- // The ObjectMeta.CreationTimestamp field is set automatically to the current time if it is unset.
- Create(obj runtime.Object) error
- // Update updates the state of the given Object in the storage. The Object must exist in the storage.
- // The ObjectMeta.CreationTimestamp field is set automatically to the current time if it is unset.
- Update(obj runtime.Object) error
-
- // Patch performs a strategic merge patch on the Object with the given UID, using the byte-encoded patch given
- Patch(key ObjectKey, patch []byte) error
- // Delete removes an Object from the storage
- Delete(key ObjectKey) error
-}
-
-// Storage is an interface for persisting and retrieving API objects to/from a backend
-// One Storage instance handles all different Kinds of Objects
-type Storage interface {
- ReadStorage
- WriteStorage
-}
-
-// NewGenericStorage constructs a new Storage
-func NewGenericStorage(rawStorage RawStorage, serializer serializer.Serializer, identifiers []runtime.IdentifierFactory) Storage {
- return &GenericStorage{rawStorage, serializer, patchutil.NewPatcher(serializer), identifiers}
-}
-
-// GenericStorage implements the Storage interface
-type GenericStorage struct {
- raw RawStorage
- serializer serializer.Serializer
- patcher patchutil.Patcher
- identifiers []runtime.IdentifierFactory
-}
-
-var _ Storage = &GenericStorage{}
-
-func (s *GenericStorage) Serializer() serializer.Serializer {
- return s.serializer
-}
-
-// Get returns a new Object for the resource at the specified kind/uid path, based on the file content
-func (s *GenericStorage) Get(key ObjectKey) (runtime.Object, error) {
- content, err := s.raw.Read(key)
- if err != nil {
- return nil, err
- }
-
- return s.decode(key, content)
-}
-
-// TODO: Verify this works
-// GetMeta returns a new Object's APIType representation for the resource at the specified kind/uid path
-func (s *GenericStorage) GetMeta(key ObjectKey) (runtime.PartialObject, error) {
- content, err := s.raw.Read(key)
- if err != nil {
- return nil, err
- }
-
- return s.decodeMeta(key, content)
-}
-
-// TODO: Make sure we don't save a partial object
-func (s *GenericStorage) write(key ObjectKey, obj runtime.Object) error {
- // Set the content type based on the format given by the RawStorage, but default to JSON
- contentType := serializer.ContentTypeJSON
- if ct := s.raw.ContentType(key); len(ct) != 0 {
- contentType = ct
- }
-
- // Set creationTimestamp if not already populated
- t := obj.GetCreationTimestamp()
- if t.IsZero() {
- obj.SetCreationTimestamp(metav1.Now())
- }
-
- var objBytes bytes.Buffer
- err := s.serializer.Encoder().Encode(serializer.NewFrameWriter(contentType, &objBytes), obj)
- if err != nil {
- return err
- }
-
- return s.raw.Write(key, objBytes.Bytes())
-}
-
-func (s *GenericStorage) Create(obj runtime.Object) error {
- key, err := s.ObjectKeyFor(obj)
- if err != nil {
- return err
- }
-
- if s.raw.Exists(key) {
- return ErrAlreadyExists
- }
-
- // The object was not found so we can safely create it
- return s.write(key, obj)
-}
-
-func (s *GenericStorage) Update(obj runtime.Object) error {
- key, err := s.ObjectKeyFor(obj)
- if err != nil {
- return err
- }
-
- if !s.raw.Exists(key) {
- return ErrNotFound
- }
-
- // The object was found so we can safely update it
- return s.write(key, obj)
-}
-
-// Patch performs a strategic merge patch on the object with the given UID, using the byte-encoded patch given
-func (s *GenericStorage) Patch(key ObjectKey, patch []byte) error {
- oldContent, err := s.raw.Read(key)
- if err != nil {
- return err
- }
-
- newContent, err := s.patcher.Apply(oldContent, patch, key.GetGVK())
- if err != nil {
- return err
- }
-
- return s.raw.Write(key, newContent)
-}
-
-// Delete removes an Object from the storage
-func (s *GenericStorage) Delete(key ObjectKey) error {
- return s.raw.Delete(key)
-}
-
-// Checksum returns a string representing the state of an Object on disk
-func (s *GenericStorage) Checksum(key ObjectKey) (string, error) {
- return s.raw.Checksum(key)
-}
-
-func (s *GenericStorage) list(kind KindKey) (result []runtime.Object, walkerr error) {
- walkerr = s.walkKind(kind, func(key ObjectKey, content []byte) error {
- obj, err := s.decode(key, content)
- if err != nil {
- return err
- }
-
- result = append(result, obj)
- return nil
- })
- return
-}
-
-// List lists Objects for the specific kind. Optionally, filters can be applied (see the filter package
-// for more information, e.g. filter.NameFilter{} and filter.UIDFilter{})
-func (s *GenericStorage) List(kind KindKey, opts ...filter.ListOption) ([]runtime.Object, error) {
- // First, complete the options struct
- o, err := filter.MakeListOptions(opts...)
- if err != nil {
- return nil, err
- }
-
- // Do an internal list to get all objects
- objs, err := s.list(kind)
- if err != nil {
- return nil, err
- }
-
- // For all list filters, pipe the output of the previous as the input to the next, in order.
- for _, filter := range o.Filters {
- objs, err = filter.Filter(objs...)
- if err != nil {
- return nil, err
- }
- }
- return objs, nil
-}
-
-// Find does a List underneath, also using filters, but always returns one object. If the List
-// underneath returned two or more results, ErrAmbiguousFind is returned. If no match was found,
-// ErrNotFound is returned.
-func (s *GenericStorage) Find(kind KindKey, opts ...filter.ListOption) (runtime.Object, error) {
- // Do a normal list underneath
- objs, err := s.List(kind, opts...)
- if err != nil {
- return nil, err
- }
- // Return based on the object count
- switch l := len(objs); l {
- case 0:
- return nil, fmt.Errorf("no Find match found: %w", ErrNotFound)
- case 1:
- return objs[0], nil
- default:
- return nil, fmt.Errorf("too many (%d) matches: %v: %w", l, objs, ErrAmbiguousFind)
- }
-}
-
-// ListMeta lists all Objects' APIType representation. In other words,
-// only metadata about each Object is unmarshalled (uid/name/kind/apiVersion).
-// This allows for faster runs (no need to unmarshal "the world"), and less
-// resource usage, when only metadata is unmarshalled into memory
-func (s *GenericStorage) ListMeta(kind KindKey) (result []runtime.PartialObject, walkerr error) {
- walkerr = s.walkKind(kind, func(key ObjectKey, content []byte) error {
-
- obj, err := s.decodeMeta(key, content)
- if err != nil {
- return err
- }
-
- result = append(result, obj)
- return nil
- })
- return
-}
-
-// Count counts the Objects for the specific kind
-func (s *GenericStorage) Count(kind KindKey) (uint64, error) {
- entries, err := s.raw.List(kind)
- return uint64(len(entries)), err
-}
-
-func (s *GenericStorage) ObjectKeyFor(obj runtime.Object) (ObjectKey, error) {
- var gvk schema.GroupVersionKind
- var err error
-
- _, isPartialObject := obj.(runtime.PartialObject)
- if isPartialObject {
- gvk = obj.GetObjectKind().GroupVersionKind()
- // TODO: Error if empty
- } else {
- gvk, err = serializer.GVKForObject(s.serializer.Scheme(), obj)
- if err != nil {
- return nil, err
- }
- }
-
- id := s.identify(obj)
- if id == nil {
- return nil, fmt.Errorf("couldn't identify object")
- }
- return NewObjectKey(NewKindKey(gvk), id), nil
-}
-
-// RawStorage returns the RawStorage instance backing this Storage
-func (s *GenericStorage) RawStorage() RawStorage {
- return s.raw
-}
-
-// Close closes all underlying resources (e.g. goroutines) used; before the application exits
-func (s *GenericStorage) Close() error {
- return nil // nothing to do here for GenericStorage
-}
-
-// identify loops through the identifiers, in priority order, to identify the object correctly
-func (s *GenericStorage) identify(obj runtime.Object) runtime.Identifyable {
- for _, identifier := range s.identifiers {
-
- id, ok := identifier.Identify(obj)
- if ok {
- return id
- }
- }
- return nil
-}
-
-func (s *GenericStorage) decode(key ObjectKey, content []byte) (runtime.Object, error) {
- gvk := key.GetGVK()
- // Decode the bytes to the internal version of the Object, if desired
- isInternal := gvk.Version == kruntime.APIVersionInternal
-
- // Decode the bytes into an Object
- ct := s.raw.ContentType(key)
- logrus.Infof("Decoding with content type %s", ct)
- obj, err := s.serializer.Decoder(
- serializer.WithConvertToHubDecode(isInternal),
- ).Decode(serializer.NewFrameReader(ct, serializer.FromBytes(content)))
- if err != nil {
- return nil, err
- }
-
- // Cast to runtime.Object, and make sure it works
- metaObj, ok := obj.(runtime.Object)
- if !ok {
- return nil, fmt.Errorf("can't convert to libgitops.runtime.Object")
- }
-
- // Set the desired gvk of this Object from the caller
- metaObj.GetObjectKind().SetGroupVersionKind(gvk)
- return metaObj, nil
-}
-
-func (s *GenericStorage) decodeMeta(key ObjectKey, content []byte) (runtime.PartialObject, error) {
- gvk := key.GetGVK()
- partobjs, err := DecodePartialObjects(serializer.FromBytes(content), s.serializer.Scheme(), false, &gvk)
- if err != nil {
- return nil, err
- }
-
- return partobjs[0], nil
-}
-
-func (s *GenericStorage) walkKind(kind KindKey, fn func(key ObjectKey, content []byte) error) error {
- keys, err := s.raw.List(kind)
- if err != nil {
- return err
- }
-
- for _, key := range keys {
- // Allow metadata.json to not exist, although the directory does exist
- if !s.raw.Exists(key) {
- continue
- }
-
- content, err := s.raw.Read(key)
- if err != nil {
- return err
- }
-
- if err := fn(key, content); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-// DecodePartialObjects reads any set of frames from the given ReadCloser, decodes the frames into
-// PartialObjects, validates that the decoded objects are known to the scheme, and optionally sets a default
-// group
-func DecodePartialObjects(rc io.ReadCloser, scheme *kruntime.Scheme, allowMultiple bool, defaultGVK *schema.GroupVersionKind) ([]runtime.PartialObject, error) {
- fr := serializer.NewYAMLFrameReader(rc)
-
- frames, err := serializer.ReadFrameList(fr)
- if err != nil {
- return nil, err
- }
-
- // If we only allow one frame, signal that early
- if !allowMultiple && len(frames) != 1 {
- return nil, fmt.Errorf("DecodePartialObjects: unexpected number of frames received from ReadCloser: %d expected 1", len(frames))
- }
-
- objs := make([]runtime.PartialObject, 0, len(frames))
- for _, frame := range frames {
- partobj, err := runtime.NewPartialObject(frame)
- if err != nil {
- return nil, err
- }
-
- gvk := partobj.GetObjectKind().GroupVersionKind()
-
- // Don't decode API objects unknown to the scheme (e.g. Kubernetes manifests)
- if !scheme.Recognizes(gvk) {
- // TODO: Typed error
- return nil, fmt.Errorf("unknown GroupVersionKind: %s", partobj.GetObjectKind().GroupVersionKind())
- }
-
- if defaultGVK != nil {
- // Set the desired gvk from the caller of this Object, if defaultGVK is set
- // In practice, this means, although we got an external type,
- // we might want internal Objects later in the client. Hence,
- // set the right expectation here
- partobj.GetObjectKind().SetGroupVersionKind(gvk)
- }
-
- objs = append(objs, partobj)
- }
- return objs, nil
-}
diff --git a/pkg/storage/sync/storage.go b/pkg/storage/sync/storage.go
deleted file mode 100644
index 458f7fa2..00000000
--- a/pkg/storage/sync/storage.go
+++ /dev/null
@@ -1,188 +0,0 @@
-package sync
-
-/*
-
-TODO: Revisit if we need this file/package in the future.
-
-import (
- "fmt"
-
- log "github.com/sirupsen/logrus"
- "github.com/weaveworks/libgitops/pkg/runtime"
- "github.com/weaveworks/libgitops/pkg/storage"
- "github.com/weaveworks/libgitops/pkg/storage/watch"
- "github.com/weaveworks/libgitops/pkg/storage/watch/update"
- "github.com/weaveworks/libgitops/pkg/util/sync"
-)
-
-const updateBuffer = 4096 // How many updates to buffer, 4096 should be enough for even a high update frequency
-
-// SyncStorage is a Storage implementation taking in multiple Storages and
-// keeping them in sync. Any write operation executed on the SyncStorage
-// is propagated to all of the Storages it manages (including the embedded
-// one). For any retrieval or generation operation, the embedded Storage
-// will be used (it is treated as read-write). As all other Storages only
-// receive write operations, they can be thought of as write-only.
-type SyncStorage struct {
- storage.Storage
- storages []storage.Storage
- inboundStream update.UpdateStream
- outboundStream update.UpdateStream
- monitor *sync.Monitor
-}
-
-// SyncStorage implements update.EventStorage.
-var _ update.EventStorage = &SyncStorage{}
-
-// NewSyncStorage constructs a new SyncStorage
-func NewSyncStorage(rwStorage storage.Storage, wStorages ...storage.Storage) storage.Storage {
- ss := &SyncStorage{
- Storage: rwStorage,
- storages: append(wStorages, rwStorage),
- }
-
- for _, s := range ss.storages {
- if watchStorage, ok := s.(watch.WatchStorage); ok {
- // Populate eventStream if we found a watchstorage
- if ss.inboundStream == nil {
- ss.inboundStream = make(update.UpdateStream, updateBuffer)
- }
- watchStorage.SetUpdateStream(ss.inboundStream)
- }
- }
-
- if ss.inboundStream != nil {
- ss.monitor = sync.RunMonitor(ss.monitorFunc)
- ss.outboundStream = make(update.UpdateStream, updateBuffer)
- }
-
- return ss
-}
-
-// Set is propagated to all Storages
-func (ss *SyncStorage) Set(obj runtime.Object) error {
- return ss.runAll(func(s storage.Storage) error {
- return s.Set(obj)
- })
-}
-
-// Patch is propagated to all Storages
-func (ss *SyncStorage) Patch(key storage.ObjectKey, patch []byte) error {
- return ss.runAll(func(s storage.Storage) error {
- return s.Patch(key, patch)
- })
-}
-
-// Delete is propagated to all Storages
-func (ss *SyncStorage) Delete(key storage.ObjectKey) error {
- return ss.runAll(func(s storage.Storage) error {
- return s.Delete(key)
- })
-}
-
-func (ss *SyncStorage) Close() error {
- // Close all WatchStorages
- for _, s := range ss.storages {
- if watchStorage, ok := s.(watch.WatchStorage); ok {
- _ = watchStorage.Close()
- }
- }
-
- // Close the event streams if set
- if ss.inboundStream != nil {
- close(ss.inboundStream)
- }
- if ss.outboundStream != nil {
- close(ss.outboundStream)
- }
- // Wait for the monitor goroutine
- ss.monitor.Wait()
- return nil
-}
-
-func (ss *SyncStorage) GetUpdateStream() update.UpdateStream {
- return ss.outboundStream
-}
-
-// runAll runs the given function for all Storages in parallel and aggregates all errors
-func (ss *SyncStorage) runAll(f func(storage.Storage) error) (err error) {
- type result struct {
- int
- error
- }
-
- errC := make(chan result)
- for i, s := range ss.storages {
- go func(i int, s storage.Storage) {
- errC <- result{i, f(s)}
- }(i, s) // NOTE: This requires i and s as arguments, otherwise they will be evaluated for one Storage only
- }
-
- for i := 0; i < len(ss.storages); i++ {
- if result := <-errC; result.error != nil {
- if err == nil {
- err = fmt.Errorf("SyncStorage: Error in Storage %d: %v", result.int, result.error)
- } else {
- err = fmt.Errorf("%v\n%29s %d: %v", err, "and error in Storage", result.int, result.error)
- }
- }
- }
-
- return
-}
-
-func (ss *SyncStorage) monitorFunc() {
- log.Debug("SyncStorage: Monitoring thread started")
- defer log.Debug("SyncStorage: Monitoring thread stopped")
-
- // TODO: Support detecting changes done when the GitOps daemon isn't running
- // This is difficult to do though, as we have don't know which state is the latest
- // For now, only update the state on write when the daemon is running
- for {
- upd, ok := <-ss.inboundStream
- if ok {
- log.Debugf("SyncStorage: Received update %v %t", upd, ok)
-
- gvk := upd.PartialObject.GetObjectKind().GroupVersionKind()
- uid := upd.PartialObject.GetUID()
- key := storage.NewObjectKey(storage.NewKindKey(gvk), runtime.NewIdentifier(string(uid)))
- log.Debugf("SyncStorage: Object has gvk=%q and uid=%q", gvk, uid)
-
- switch upd.Event {
- case update.ObjectEventModify, update.ObjectEventCreate:
- // First load the Object using the Storage given in the update,
- // then set it using the client constructed above
-
- obj, err := upd.Storage.Get(key)
- if err != nil {
- log.Errorf("Failed to get Object with UID %q: %v", upd.PartialObject.GetUID(), err)
- continue
- }
-
- if err = ss.Set(obj); err != nil {
- log.Errorf("Failed to set Object with UID %q: %v", upd.PartialObject.GetUID(), err)
- continue
- }
- case update.ObjectEventDelete:
- // For deletion we use the generated "fake" APIType object
- if err := ss.Delete(key); err != nil {
- log.Errorf("Failed to delete Object with UID %q: %v", upd.PartialObject.GetUID(), err)
- continue
- }
- }
-
- // Send the update to the listeners unless the channel is full,
- // in which case issue a warning. The channel can hold as many
- // updates as updateBuffer specifies.
- select {
- case ss.outboundStream <- upd:
- log.Debugf("SyncStorage: Sent update: %v", upd)
- default:
- log.Warn("SyncStorage: Failed to send update, channel full")
- }
- } else {
- return
- }
- }
-}
-*/
diff --git a/pkg/storage/transaction/commit.go b/pkg/storage/transaction/commit.go
deleted file mode 100644
index 30e55ae0..00000000
--- a/pkg/storage/transaction/commit.go
+++ /dev/null
@@ -1,79 +0,0 @@
-package transaction
-
-import (
- "fmt"
-
- "github.com/fluxcd/go-git-providers/validation"
-)
-
-// CommitResult describes a result of a transaction.
-type CommitResult interface {
- // GetAuthorName describes the author's name (as per git config)
- // +required
- GetAuthorName() string
- // GetAuthorEmail describes the author's email (as per git config)
- // +required
- GetAuthorEmail() string
- // GetTitle describes the change concisely, so it can be used as a commit message or PR title.
- // +required
- GetTitle() string
- // GetDescription contains optional extra information about the change.
- // +optional
- GetDescription() string
-
- // GetMessage returns GetTitle() followed by a newline and GetDescription(), if set.
- GetMessage() string
- // Validate validates that all required fields are set, and given data is valid.
- Validate() error
-}
-
-// GenericCommitResult implements CommitResult.
-var _ CommitResult = &GenericCommitResult{}
-
-// GenericCommitResult implements CommitResult.
-type GenericCommitResult struct {
- // AuthorName describes the author's name (as per git config)
- // +required
- AuthorName string
- // AuthorEmail describes the author's email (as per git config)
- // +required
- AuthorEmail string
- // Title describes the change concisely, so it can be used as a commit message or PR title.
- // +required
- Title string
- // Description contains optional extra information about the change.
- // +optional
- Description string
-}
-
-func (r *GenericCommitResult) GetAuthorName() string {
- return r.AuthorName
-}
-func (r *GenericCommitResult) GetAuthorEmail() string {
- return r.AuthorEmail
-}
-func (r *GenericCommitResult) GetTitle() string {
- return r.Title
-}
-func (r *GenericCommitResult) GetDescription() string {
- return r.Description
-}
-func (r *GenericCommitResult) GetMessage() string {
- if len(r.Description) == 0 {
- return r.Title
- }
- return fmt.Sprintf("%s\n%s", r.Title, r.Description)
-}
-func (r *GenericCommitResult) Validate() error {
- v := validation.New("GenericCommitResult")
- if len(r.AuthorName) == 0 {
- v.Required("AuthorName")
- }
- if len(r.AuthorEmail) == 0 {
- v.Required("AuthorEmail")
- }
- if len(r.Title) == 0 {
- v.Required("Title")
- }
- return v.Error()
-}
diff --git a/pkg/storage/transaction/git.go b/pkg/storage/transaction/git.go
deleted file mode 100644
index efc57ab3..00000000
--- a/pkg/storage/transaction/git.go
+++ /dev/null
@@ -1,161 +0,0 @@
-package transaction
-
-import (
- "context"
- "fmt"
- "strings"
-
- "github.com/sirupsen/logrus"
- "github.com/weaveworks/libgitops/pkg/gitdir"
- "github.com/weaveworks/libgitops/pkg/runtime"
- "github.com/weaveworks/libgitops/pkg/serializer"
- "github.com/weaveworks/libgitops/pkg/storage"
- "github.com/weaveworks/libgitops/pkg/util"
- "github.com/weaveworks/libgitops/pkg/util/watcher"
-)
-
-var excludeDirs = []string{".git"}
-
-func NewGitStorage(gitDir gitdir.GitDirectory, prProvider PullRequestProvider, ser serializer.Serializer) (TransactionStorage, error) {
- // Make sure the repo is cloned. If this func has already been called, it will be a no-op.
- if err := gitDir.StartCheckoutLoop(); err != nil {
- return nil, err
- }
-
- raw := storage.NewGenericMappedRawStorage(gitDir.Dir())
- s := storage.NewGenericStorage(raw, ser, []runtime.IdentifierFactory{runtime.Metav1NameIdentifier})
-
- gitStorage := &GitStorage{
- ReadStorage: s,
- s: s,
- raw: raw,
- gitDir: gitDir,
- prProvider: prProvider,
- }
- // Do a first sync now, and then start the background loop
- if err := gitStorage.sync(); err != nil {
- return nil, err
- }
- gitStorage.syncLoop()
-
- return gitStorage, nil
-}
-
-type GitStorage struct {
- storage.ReadStorage
-
- s storage.Storage
- raw storage.MappedRawStorage
- gitDir gitdir.GitDirectory
- prProvider PullRequestProvider
-}
-
-func (s *GitStorage) syncLoop() {
- go func() {
- for {
- if commit, ok := <-s.gitDir.CommitChannel(); ok {
- logrus.Debugf("GitStorage: Got info about commit %q, syncing...", commit)
- if err := s.sync(); err != nil {
- logrus.Errorf("GitStorage: Got sync error: %v", err)
- }
- }
- }
- }()
-}
-
-func (s *GitStorage) sync() error {
- mappings, err := computeMappings(s.gitDir.Dir(), s.s)
- if err != nil {
- return err
- }
- logrus.Debugf("Rewriting the mappings to %v", mappings)
- s.raw.SetMappings(mappings)
- return nil
-}
-
-func (s *GitStorage) Transaction(ctx context.Context, streamName string, fn TransactionFunc) error {
- // Append random bytes to the end of the stream name if it ends with a dash
- if strings.HasSuffix(streamName, "-") {
- suffix, err := util.RandomSHA(4)
- if err != nil {
- return err
- }
- streamName += suffix
- }
-
- // Make sure we have the latest available state
- if err := s.gitDir.Pull(ctx); err != nil {
- return err
- }
- // Make sure no other Git ops can take place during the transaction, wait for other ongoing operations.
- s.gitDir.Suspend()
- defer s.gitDir.Resume()
- // Always switch back to the main branch afterwards.
- // TODO ordering of the defers, and return deferred error
- defer func() { _ = s.gitDir.CheckoutMainBranch() }()
-
- // Check out a new branch with the given name
- if err := s.gitDir.CheckoutNewBranch(streamName); err != nil {
- return err
- }
- // Invoke the transaction
- result, err := fn(ctx, s.s)
- if err != nil {
- return err
- }
- // Make sure the result is valid
- if err := result.Validate(); err != nil {
- return fmt.Errorf("transaction result is not valid: %w", err)
- }
- // Perform the commit
- if err := s.gitDir.Commit(ctx, result.GetAuthorName(), result.GetAuthorEmail(), result.GetMessage()); err != nil {
- return err
- }
- // Return if no PR should be made
- prResult, ok := result.(PullRequestResult)
- if !ok {
- return nil
- }
- // If a PR was asked for, and no provider was given, error out
- if s.prProvider == nil {
- return ErrNoPullRequestProvider
- }
- // Create the PR using the provider.
- return s.prProvider.CreatePullRequest(ctx, &GenericPullRequestSpec{
- PullRequestResult: prResult,
- MainBranch: s.gitDir.MainBranch(),
- MergeBranch: streamName,
- RepositoryRef: s.gitDir.RepositoryRef(),
- })
-}
-
-func computeMappings(dir string, s storage.Storage) (map[storage.ObjectKey]string, error) {
- validExts := make([]string, 0, len(storage.ContentTypes))
- for ext := range storage.ContentTypes {
- validExts = append(validExts, ext)
- }
-
- files, err := watcher.WalkDirectoryForFiles(dir, validExts, excludeDirs)
- if err != nil {
- return nil, err
- }
-
- // TODO: Compute the difference between the earlier state, and implement EventStorage so the user
- // can automatically subscribe to changes of objects between versions.
- m := map[storage.ObjectKey]string{}
- for _, file := range files {
- partObjs, err := storage.DecodePartialObjects(serializer.FromFile(file), s.Serializer().Scheme(), false, nil)
- if err != nil {
- logrus.Errorf("couldn't decode %q into a partial object: %v", file, err)
- continue
- }
- key, err := s.ObjectKeyFor(partObjs[0])
- if err != nil {
- logrus.Errorf("couldn't get objectkey for partial object: %v", err)
- continue
- }
- logrus.Debugf("Adding mapping between %s and %q", key, file)
- m[key] = file
- }
- return m, nil
-}
diff --git a/pkg/storage/transaction/pullrequest.go b/pkg/storage/transaction/pullrequest.go
deleted file mode 100644
index bf0fcf23..00000000
--- a/pkg/storage/transaction/pullrequest.go
+++ /dev/null
@@ -1,130 +0,0 @@
-package transaction
-
-import (
- "context"
-
- "github.com/fluxcd/go-git-providers/gitprovider"
- "github.com/fluxcd/go-git-providers/validation"
-)
-
-// PullRequestResult can be returned from a TransactionFunc instead of a CommitResult, if
-// a PullRequest is desired to be created by the PullRequestProvider.
-type PullRequestResult interface {
- // PullRequestResult is a superset of CommitResult
- CommitResult
-
- // GetLabels specifies what labels should be applied on the PR.
- // +optional
- GetLabels() []string
- // GetAssignees specifies what user login names should be assigned to this PR.
- // Note: Only users with "pull" access or more can be assigned.
- // +optional
- GetAssignees() []string
- // GetMilestone specifies what milestone this should be attached to.
- // +optional
- GetMilestone() string
-}
-
-// GenericPullRequestResult implements PullRequestResult.
-var _ PullRequestResult = &GenericPullRequestResult{}
-
-// GenericPullRequestResult implements PullRequestResult.
-type GenericPullRequestResult struct {
- // GenericPullRequestResult is a superset of a CommitResult.
- CommitResult
-
- // Labels specifies what labels should be applied on the PR.
- // +optional
- Labels []string
- // Assignees specifies what user login names should be assigned to this PR.
- // Note: Only users with "pull" access or more can be assigned.
- // +optional
- Assignees []string
- // Milestone specifies what milestone this should be attached to.
- // +optional
- Milestone string
-}
-
-func (r *GenericPullRequestResult) GetLabels() []string {
- return r.Labels
-}
-func (r *GenericPullRequestResult) GetAssignees() []string {
- return r.Assignees
-}
-func (r *GenericPullRequestResult) GetMilestone() string {
- return r.Milestone
-}
-func (r *GenericPullRequestResult) Validate() error {
- v := validation.New("GenericPullRequestResult")
- // Just validate the "inner" object
- v.Append(r.CommitResult.Validate(), r.CommitResult, "CommitResult")
- return v.Error()
-}
-
-// PullRequestSpec is the messaging interface between the TransactionStorage, and the
-// PullRequestProvider. The PullRequestSpec contains all the needed information for creating
-// a Pull Request successfully.
-type PullRequestSpec interface {
- // PullRequestSpec is a superset of PullRequestResult.
- PullRequestResult
-
- // GetMainBranch returns the main branch of the repository.
- // +required
- GetMainBranch() string
- // GetMergeBranch returns the branch that is pending to be merged into main with this PR.
- // +required
- GetMergeBranch() string
- // GetMergeBranch returns the branch that is pending to be merged into main with this PR.
- // +required
- GetRepositoryRef() gitprovider.RepositoryRef
-}
-
-// GenericPullRequestSpec implements PullRequestSpec.
-type GenericPullRequestSpec struct {
- // GenericPullRequestSpec is a superset of PullRequestResult.
- PullRequestResult
-
- // MainBranch returns the main branch of the repository.
- // +required
- MainBranch string
- // MergeBranch returns the branch that is pending to be merged into main with this PR.
- // +required
- MergeBranch string
- // RepositoryRef returns the branch that is pending to be merged into main with this PR.
- // +required
- RepositoryRef gitprovider.RepositoryRef
-}
-
-func (r *GenericPullRequestSpec) GetMainBranch() string {
- return r.MainBranch
-}
-func (r *GenericPullRequestSpec) GetMergeBranch() string {
- return r.MergeBranch
-}
-func (r *GenericPullRequestSpec) GetRepositoryRef() gitprovider.RepositoryRef {
- return r.RepositoryRef
-}
-func (r *GenericPullRequestSpec) Validate() error {
- v := validation.New("GenericPullRequestSpec")
- // Just validate the "inner" object
- v.Append(r.PullRequestResult.Validate(), r.PullRequestResult, "PullRequestResult")
-
- if len(r.MainBranch) == 0 {
- v.Required("MainBranch")
- }
- if len(r.MergeBranch) == 0 {
- v.Required("MergeBranch")
- }
- if r.RepositoryRef == nil {
- v.Required("RepositoryRef")
- }
- return v.Error()
-}
-
-// PullRequestProvider is an interface for providers that can create so-called "Pull Requests",
-// as popularized by Git. A Pull Request is a formal ask for a branch to be merged into the main one.
-// It can be UI-based, as in GitHub and GitLab, or it can be using some other method.
-type PullRequestProvider interface {
- // CreatePullRequest creates a Pull Request using the given specification.
- CreatePullRequest(ctx context.Context, spec PullRequestSpec) error
-}
diff --git a/pkg/storage/transaction/pullrequest/github/github.go b/pkg/storage/transaction/pullrequest/github/github.go
deleted file mode 100644
index d8efbd65..00000000
--- a/pkg/storage/transaction/pullrequest/github/github.go
+++ /dev/null
@@ -1,119 +0,0 @@
-package github
-
-import (
- "context"
- "errors"
- "fmt"
-
- "github.com/fluxcd/go-git-providers/github"
- "github.com/fluxcd/go-git-providers/gitprovider"
- gogithub "github.com/google/go-github/v32/github"
- "github.com/weaveworks/libgitops/pkg/storage/transaction"
-)
-
-// TODO: This package should really only depend on go-git-providers' abstraction interface
-
-var ErrProviderNotSupported = errors.New("only the Github go-git-providers provider is supported at the moment")
-
-// NewGitHubPRProvider returns a new transaction.PullRequestProvider from a gitprovider.Client.
-func NewGitHubPRProvider(c gitprovider.Client) (transaction.PullRequestProvider, error) {
- // Make sure a Github client was passed
- if c.ProviderID() != github.ProviderID {
- return nil, ErrProviderNotSupported
- }
- return &prCreator{c}, nil
-}
-
-type prCreator struct {
- c gitprovider.Client
-}
-
-func (c *prCreator) CreatePullRequest(ctx context.Context, spec transaction.PullRequestSpec) error {
- // First, validate the input
- if err := spec.Validate(); err != nil {
- return fmt.Errorf("given PullRequestSpec wasn't valid")
- }
-
- // Use the "raw" go-github client to do this
- ghClient := c.c.Raw().(*gogithub.Client)
-
- // Helper variables
- owner := spec.GetRepositoryRef().GetIdentity()
- repo := spec.GetRepositoryRef().GetRepository()
- var body *string
- if spec.GetDescription() != "" {
- body = gogithub.String(spec.GetDescription())
- }
-
- // Create the Pull Request
- pr, _, err := ghClient.PullRequests.Create(ctx, owner, repo, &gogithub.NewPullRequest{
- Head: gogithub.String(spec.GetMergeBranch()),
- Base: gogithub.String(spec.GetMainBranch()),
- Title: gogithub.String(spec.GetTitle()),
- Body: body,
- })
- if err != nil {
- return err
- }
-
- // If spec.GetMilestone() is set, fetch the ID of the milestone
- // Only set milestoneID to non-nil if specified
- var milestoneID *int
- if len(spec.GetMilestone()) != 0 {
- milestoneID, err = getMilestoneID(ctx, ghClient, owner, repo, spec.GetMilestone())
- if err != nil {
- return err
- }
- }
-
- // Only set assignees to non-nil if specified
- var assignees *[]string
- if a := spec.GetAssignees(); len(a) != 0 {
- assignees = &a
- }
-
- // Only set labels to non-nil if specified
- var labels *[]string
- if l := spec.GetLabels(); len(l) != 0 {
- labels = &l
- }
-
- // Only PATCH the PR if any of the fields were set
- if milestoneID != nil || assignees != nil || labels != nil {
- _, _, err := ghClient.Issues.Edit(ctx, owner, repo, pr.GetNumber(), &gogithub.IssueRequest{
- Milestone: milestoneID,
- Assignees: assignees,
- Labels: labels,
- })
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func getMilestoneID(ctx context.Context, c *gogithub.Client, owner, repo, milestoneName string) (*int, error) {
- // List all milestones in the repo
- // TODO: This could/should use pagination
- milestones, _, err := c.Issues.ListMilestones(ctx, owner, repo, &gogithub.MilestoneListOptions{
- State: "all",
- })
- if err != nil {
- return nil, err
- }
- // Loop through all milestones, search for one with the right name
- for _, milestone := range milestones {
- // Only consider a milestone with the right name
- if milestone.GetTitle() != milestoneName {
- continue
- }
- // Validate nil to avoid panics
- if milestone.Number == nil {
- return nil, fmt.Errorf("didn't expect milestone Number to be nil: %v", milestone)
- }
- // Return the Milestone number
- return milestone.Number, nil
- }
- return nil, fmt.Errorf("couldn't find milestone with name: %s", milestoneName)
-}
diff --git a/pkg/storage/transaction/storage.go b/pkg/storage/transaction/storage.go
deleted file mode 100644
index 8a60e93b..00000000
--- a/pkg/storage/transaction/storage.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package transaction
-
-import (
- "context"
- "errors"
-
- "github.com/weaveworks/libgitops/pkg/storage"
-)
-
-var (
- ErrAbortTransaction = errors.New("transaction aborted")
- ErrTransactionActive = errors.New("transaction is active")
- ErrNoPullRequestProvider = errors.New("no pull request provider given")
-)
-
-type TransactionFunc func(ctx context.Context, s storage.Storage) (CommitResult, error)
-
-type TransactionStorage interface {
- storage.ReadStorage
-
- // Transaction creates a new "stream" (for Git: branch) with the given name, or
- // prefix if streamName ends with a dash (in that case, a 8-char hash will be appended).
- // The environment is made sure to be as up-to-date as possible before fn executes. When
- // fn executes, the given storage can be used to modify the desired state. If you want to
- // "commit" the changes made in fn, just return nil. If you want to abort, return ErrAbortTransaction.
- // If you want to
- Transaction(ctx context.Context, streamName string, fn TransactionFunc) error
-}
diff --git a/pkg/storage/utils.go b/pkg/storage/utils.go
new file mode 100644
index 00000000..799f2dc6
--- /dev/null
+++ b/pkg/storage/utils.go
@@ -0,0 +1,23 @@
+package storage
+
+import (
+ "fmt"
+
+ "github.com/weaveworks/libgitops/pkg/storage/core"
+)
+
+// VerifyNamespaced verifies that the given GroupKind and namespace parameter follows
+// the rule of the Namespacer.
+func VerifyNamespaced(namespacer Namespacer, gk core.GroupKind, ns string) error {
+ // Get namespacing info
+ namespaced, err := namespacer.IsNamespaced(gk)
+ if err != nil {
+ return err
+ }
+ if namespaced && ns == "" {
+ return fmt.Errorf("%w: namespaced kind %v requires non-empty namespace", ErrNamespacedMismatch, gk)
+ } else if !namespaced && ns != "" {
+ return fmt.Errorf("%w: non-namespaced kind %v must not have namespace parameter set", ErrNamespacedMismatch, gk)
+ }
+ return nil
+}
diff --git a/pkg/storage/watch/storage.go b/pkg/storage/watch/storage.go
deleted file mode 100644
index f3d7b0bb..00000000
--- a/pkg/storage/watch/storage.go
+++ /dev/null
@@ -1,244 +0,0 @@
-package watch
-
-import (
- "io/ioutil"
-
- log "github.com/sirupsen/logrus"
- "github.com/weaveworks/libgitops/pkg/runtime"
- "github.com/weaveworks/libgitops/pkg/serializer"
- "github.com/weaveworks/libgitops/pkg/storage"
- "github.com/weaveworks/libgitops/pkg/storage/watch/update"
- "github.com/weaveworks/libgitops/pkg/util/sync"
- "github.com/weaveworks/libgitops/pkg/util/watcher"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "k8s.io/apimachinery/pkg/types"
-)
-
-// NewManifestStorage returns a pre-configured GenericWatchStorage backed by a storage.GenericStorage,
-// and a GenericMappedRawStorage for the given manifestDir and Serializer. This should be sufficient
-// for most users that want to watch changes in a directory with manifests.
-func NewManifestStorage(manifestDir string, ser serializer.Serializer) (update.EventStorage, error) {
- return NewGenericWatchStorage(
- storage.NewGenericStorage(
- storage.NewGenericMappedRawStorage(manifestDir),
- ser,
- []runtime.IdentifierFactory{runtime.Metav1NameIdentifier},
- ),
- )
-}
-
-// NewGenericWatchStorage is an extended Storage implementation, which provides a watcher
-// for watching changes in the directory managed by the embedded Storage's RawStorage.
-// If the RawStorage is a MappedRawStorage instance, it's mappings will automatically
-// be updated by the WatchStorage. Update events are sent to the given event stream.
-// Note: This WatchStorage only works for one-frame files (i.e. only one YAML document
-// per file is supported).
-func NewGenericWatchStorage(s storage.Storage) (update.EventStorage, error) {
- ws := &GenericWatchStorage{
- Storage: s,
- }
-
- var err error
- var files []string
- if ws.watcher, files, err = watcher.NewFileWatcher(s.RawStorage().WatchDir()); err != nil {
- return nil, err
- }
-
- ws.monitor = sync.RunMonitor(func() {
- ws.monitorFunc(ws.RawStorage(), files) // Offload the file registration to the goroutine
- })
-
- return ws, nil
-}
-
-// EventDeleteObjectName is used as the name of an object sent to the
-// GenericWatchStorage's event stream when the the object has been deleted
-const EventDeleteObjectName = ""
-
-// GenericWatchStorage implements the WatchStorage interface
-type GenericWatchStorage struct {
- storage.Storage
- watcher *watcher.FileWatcher
- events update.UpdateStream
- monitor *sync.Monitor
-}
-
-var _ update.EventStorage = &GenericWatchStorage{}
-
-// Suspend modify events during Create
-func (s *GenericWatchStorage) Create(obj runtime.Object) error {
- s.watcher.Suspend(watcher.FileEventModify)
- return s.Storage.Create(obj)
-}
-
-// Suspend modify events during Update
-func (s *GenericWatchStorage) Update(obj runtime.Object) error {
- s.watcher.Suspend(watcher.FileEventModify)
- return s.Storage.Update(obj)
-}
-
-// Suspend modify events during Patch
-func (s *GenericWatchStorage) Patch(key storage.ObjectKey, patch []byte) error {
- s.watcher.Suspend(watcher.FileEventModify)
- return s.Storage.Patch(key, patch)
-}
-
-// Suspend delete events during Delete
-func (s *GenericWatchStorage) Delete(key storage.ObjectKey) error {
- s.watcher.Suspend(watcher.FileEventDelete)
- return s.Storage.Delete(key)
-}
-
-func (s *GenericWatchStorage) SetUpdateStream(eventStream update.UpdateStream) {
- s.events = eventStream
-}
-
-func (s *GenericWatchStorage) Close() error {
- s.watcher.Close()
- s.monitor.Wait()
- return nil
-}
-
-func (s *GenericWatchStorage) monitorFunc(raw storage.RawStorage, files []string) {
- log.Debug("GenericWatchStorage: Monitoring thread started")
- defer log.Debug("GenericWatchStorage: Monitoring thread stopped")
- var content []byte
-
- // Send a MODIFY event for all files (and fill the mappings
- // of the MappedRawStorage) before starting to monitor changes
- for _, file := range files {
- content, err := ioutil.ReadFile(file)
- if err != nil {
- log.Warnf("Ignoring %q: %v", file, err)
- continue
- }
-
- obj, err := runtime.NewPartialObject(content)
- if err != nil {
- log.Warnf("Ignoring %q: %v", file, err)
- continue
- }
-
- // Add a mapping between this object and path
- s.addMapping(raw, obj, file)
- // Send the event to the events channel
- s.sendEvent(update.ObjectEventModify, obj)
- }
-
- for {
- if event, ok := <-s.watcher.GetFileUpdateStream(); ok {
- var partObj runtime.PartialObject
- var err error
-
- var objectEvent update.ObjectEvent
- switch event.Event {
- case watcher.FileEventModify:
- objectEvent = update.ObjectEventModify
- case watcher.FileEventDelete:
- objectEvent = update.ObjectEventDelete
- }
-
- log.Tracef("GenericWatchStorage: Processing event: %s", event.Event)
- if event.Event == watcher.FileEventDelete {
- key, err := raw.GetKey(event.Path)
- if err != nil {
- log.Warnf("Failed to retrieve data for %q: %v", event.Path, err)
- continue
- }
-
- // This creates a "fake" Object from the key to be used for
- // deletion, as the original has already been removed from disk
- apiVersion, kind := key.GetGVK().ToAPIVersionAndKind()
- partObj = &runtime.PartialObjectImpl{
- TypeMeta: metav1.TypeMeta{
- APIVersion: apiVersion,
- Kind: kind,
- },
- ObjectMeta: metav1.ObjectMeta{
- Name: EventDeleteObjectName,
- // TODO: This doesn't take into account where e.g. the identifier is "{namespace}/{name}"
- UID: types.UID(key.GetIdentifier()),
- },
- }
- // remove the mapping for this key as it's now deleted
- s.removeMapping(raw, key)
- } else {
- content, err = ioutil.ReadFile(event.Path)
- if err != nil {
- log.Warnf("Ignoring %q: %v", event.Path, err)
- continue
- }
-
- if partObj, err = runtime.NewPartialObject(content); err != nil {
- log.Warnf("Ignoring %q: %v", event.Path, err)
- continue
- }
-
- if event.Event == watcher.FileEventMove {
- // Update the mappings for the moved file (AddMapping overwrites)
- s.addMapping(raw, partObj, event.Path)
-
- // Internal move events are a no-op
- continue
- }
-
- // This is based on the key's existence instead of watcher.EventCreate,
- // as Objects can get updated (via watcher.FileEventModify) to be conformant
- if _, err = raw.GetKey(event.Path); err != nil {
- // Add a mapping between this object and path
- s.addMapping(raw, partObj, event.Path)
-
- // This is what actually determines if an Object is created,
- // so update the event to update.ObjectEventCreate here
- objectEvent = update.ObjectEventCreate
- }
- }
-
- // Send the objectEvent to the events channel
- if objectEvent != update.ObjectEventNone {
- s.sendEvent(objectEvent, partObj)
- }
- } else {
- return
- }
- }
-}
-
-func (s *GenericWatchStorage) sendEvent(event update.ObjectEvent, partObj runtime.PartialObject) {
- if s.events != nil {
- log.Tracef("GenericWatchStorage: Sending event: %v", event)
- s.events <- update.Update{
- Event: event,
- PartialObject: partObj,
- Storage: s,
- }
- }
-}
-
-// addMapping registers a mapping between the given object and the specified path, if raw is a
-// MappedRawStorage. If a given mapping already exists between this object and some path, it
-// will be overridden with the specified new path
-func (s *GenericWatchStorage) addMapping(raw storage.RawStorage, obj runtime.Object, file string) {
- mapped, ok := raw.(storage.MappedRawStorage)
- if !ok {
- return
- }
-
- // Let the embedded storage decide using its identifiers how to
- key, err := s.Storage.ObjectKeyFor(obj)
- if err != nil {
- log.Errorf("couldn't get object key for: gvk=%s, uid=%s, name=%s", obj.GetObjectKind().GroupVersionKind(), obj.GetUID(), obj.GetName())
- }
-
- mapped.AddMapping(key, file)
-}
-
-// removeMapping removes a mapping a file that doesn't exist
-func (s *GenericWatchStorage) removeMapping(raw storage.RawStorage, key storage.ObjectKey) {
- mapped, ok := raw.(storage.MappedRawStorage)
- if !ok {
- return
- }
-
- mapped.RemoveMapping(key)
-}
diff --git a/pkg/storage/watch/update/event.go b/pkg/storage/watch/update/event.go
deleted file mode 100644
index 57367b7d..00000000
--- a/pkg/storage/watch/update/event.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package update
-
-import "fmt"
-
-// ObjectEvent is an enum describing a change in an Object's state.
-type ObjectEvent byte
-
-var _ fmt.Stringer = ObjectEvent(0)
-
-const (
- ObjectEventNone ObjectEvent = iota // 0
- ObjectEventCreate // 1
- ObjectEventModify // 2
- ObjectEventDelete // 3
-)
-
-func (o ObjectEvent) String() string {
- switch o {
- case 0:
- return "NONE"
- case 1:
- return "CREATE"
- case 2:
- return "MODIFY"
- case 3:
- return "DELETE"
- }
-
- // Should never happen
- return "UNKNOWN"
-}
diff --git a/pkg/storage/watch/update/update.go b/pkg/storage/watch/update/update.go
deleted file mode 100644
index 05ea7e0e..00000000
--- a/pkg/storage/watch/update/update.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package update
-
-import (
- "github.com/weaveworks/libgitops/pkg/runtime"
- "github.com/weaveworks/libgitops/pkg/storage"
-)
-
-// Update bundles an FileEvent with an
-// APIType for Storage retrieval.
-type Update struct {
- Event ObjectEvent
- PartialObject runtime.PartialObject
- Storage storage.Storage
-}
-
-// UpdateStream is a channel of updates.
-type UpdateStream chan Update
-
-// EventStorage is a storage that exposes an UpdateStream.
-type EventStorage interface {
- storage.Storage
-
- // SetUpdateStream gives the EventStorage a channel to send events to.
- // The caller is responsible for choosing a large enough buffer to avoid
- // blocking the underlying EventStorage implementation unnecessarily.
- // TODO: In the future maybe enable sending events to multiple listeners?
- SetUpdateStream(UpdateStream)
-}
diff --git a/pkg/tracing/logging.go b/pkg/tracing/logging.go
new file mode 100644
index 00000000..f4f8269d
--- /dev/null
+++ b/pkg/tracing/logging.go
@@ -0,0 +1,133 @@
+package tracing
+
+import (
+ "context"
+
+ "github.com/go-logr/logr"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/codes"
+ "go.opentelemetry.io/otel/trace"
+ ctrllog "sigs.k8s.io/controller-runtime/pkg/log"
+)
+
+func NewLoggingTracerProvider(tp trace.TracerProvider) SDKTracerProvider {
+ return &loggingTracerProvider{tp}
+}
+
+type loggingTracerProvider struct {
+ tp trace.TracerProvider
+}
+
+func (tp *loggingTracerProvider) Tracer(instrumentationName string, opts ...trace.TracerOption) trace.Tracer {
+ tracer := tp.tp.Tracer(instrumentationName, opts...)
+ return &loggingTracer{provider: tp, tracer: tracer, name: instrumentationName}
+}
+
+func (tp *loggingTracerProvider) Shutdown(ctx context.Context) error {
+ p, ok := tp.tp.(SDKTracerProvider)
+ if !ok {
+ return nil
+ }
+ return p.Shutdown(ctx)
+}
+
+func (tp *loggingTracerProvider) ForceFlush(ctx context.Context) error {
+ p, ok := tp.tp.(SDKTracerProvider)
+ if !ok {
+ return nil
+ }
+ return p.ForceFlush(ctx)
+}
+
+type loggingTracer struct {
+ provider trace.TracerProvider
+ tracer trace.Tracer
+ name string
+}
+
+func (t *loggingTracer) Start(ctx context.Context, spanName string, opts ...trace.SpanStartOption) (context.Context, trace.Span) {
+ log := ctrllog.FromContext(ctx).WithName(t.name) //.WithValues(spanNameKey, spanName)
+ spanCfg := trace.NewSpanStartConfig(opts...)
+ startLog := log
+ if len(spanCfg.Attributes()) != 0 {
+ startLog = startLog.WithValues(spanAttributesKey, spanCfg.Attributes())
+ }
+ startLog.Info("starting span")
+
+ ctx, span := t.tracer.Start(ctx, spanName, opts...)
+ logSpan := &loggingSpan{t.provider, log, span, spanName}
+ ctx = trace.ContextWithSpan(ctx, logSpan)
+ return ctx, logSpan
+}
+
+type loggingSpan struct {
+ provider trace.TracerProvider
+ log logr.Logger
+ span trace.Span
+ spanName string
+}
+
+const (
+ spanNameKey = "span-name"
+ spanEventKey = "span-event"
+ spanStatusCodeKey = "span-status-code"
+ spanStatusDescriptionKey = "span-status-description"
+ spanAttributesKey = "span-attributes"
+)
+
+func (s *loggingSpan) End(options ...trace.SpanEndOption) {
+ s.log.Info("ending span")
+ s.span.End(options...)
+}
+
+// AddEvent adds an event with the provided name and options.
+func (s *loggingSpan) AddEvent(name string, options ...trace.EventOption) {
+ s.log.Info("recorded span event", spanEventKey, name)
+ s.span.AddEvent(name, options...)
+}
+
+// IsRecording returns the recording state of the Span. It will return
+// true if the Span is active and events can be recorded.
+func (s *loggingSpan) IsRecording() bool { return s.span.IsRecording() }
+
+// RecordError will record err as an exception span event for this span. An
+// additional call to SetStatus is required if the Status of the Span should
+// be set to Error, as this method does not change the Span status. If this
+// span is not being recorded or err is nil then this method does nothing.
+func (s *loggingSpan) RecordError(err error, options ...trace.EventOption) {
+ s.log.Error(err, "recorded span error")
+ s.span.RecordError(err, options...)
+}
+
+// SpanContext returns the SpanContext of the Span. The returned SpanContext
+// is usable even after the End method has been called for the Span.
+func (s *loggingSpan) SpanContext() trace.SpanContext { return s.span.SpanContext() }
+
+// SetStatus sets the status of the Span in the form of a code and a
+// description, overriding previous values set. The description is only
+// included in a status when the code is for an error.
+func (s *loggingSpan) SetStatus(code codes.Code, description string) {
+ s.log.Info("recorded span status change",
+ spanStatusCodeKey, code.String(),
+ spanStatusDescriptionKey, description)
+ s.span.SetStatus(code, description)
+}
+
+// SetName sets the Span name.
+func (s *loggingSpan) SetName(name string) {
+ s.log.Info("recorded span name change", spanNameKey, name)
+ s.log = s.log.WithValues(spanNameKey, name)
+ s.span.SetName(name)
+}
+
+// SetAttributes sets kv as attributes of the Span. If a key from kv
+// already exists for an attribute of the Span it will be overwritten with
+// the value contained in kv.
+func (s *loggingSpan) SetAttributes(kv ...attribute.KeyValue) {
+ s.log.Info("recorded span attribute change", spanAttributesKey, kv)
+ s.span.SetAttributes(kv...)
+}
+
+// TracerProvider returns a TracerProvider that can be used to generate
+// additional Spans on the same telemetry pipeline as the current Span.
+func (s *loggingSpan) TracerProvider() trace.TracerProvider { return s.provider }
diff --git a/pkg/tracing/tracer_provider.go b/pkg/tracing/tracer_provider.go
new file mode 100644
index 00000000..622b6d30
--- /dev/null
+++ b/pkg/tracing/tracer_provider.go
@@ -0,0 +1,248 @@
+package tracing
+
+import (
+ "context"
+ "errors"
+ "time"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/exporters/jaeger"
+ "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
+ "go.opentelemetry.io/otel/exporters/stdout/stdouttrace"
+ "go.opentelemetry.io/otel/sdk/resource"
+ tracesdk "go.opentelemetry.io/otel/sdk/trace"
+ semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
+ "go.opentelemetry.io/otel/trace"
+ "go.uber.org/multierr"
+)
+
+type SDKTracerProvider interface {
+ trace.TracerProvider
+ Shutdown(ctx context.Context) error
+ ForceFlush(ctx context.Context) error
+}
+
+// NewBuilder returns a new TracerProviderBuilder instance.
+func NewBuilder() TracerProviderBuilder {
+ return &builder{}
+}
+
+// TracerProviderBuilder is a builder for a TracerProviderWithShutdown.
+type TracerProviderBuilder interface {
+ // RegisterInsecureOTelExporter registers an exporter to an OpenTelemetry Collector on the
+ // given address, which defaults to "localhost:55680" if addr is empty. The OpenTelemetry
+ // Collector speaks gRPC, hence, don't add any "http(s)://" prefix to addr. The OpenTelemetry
+ // Collector is just a proxy, it in turn can forward e.g. traces to Jaeger and metrics to
+ // Prometheus. Additional options can be supplied that can override the default behavior.
+ RegisterInsecureOTelExporter(ctx context.Context, addr string, opts ...otlptracegrpc.Option) TracerProviderBuilder
+
+ // RegisterInsecureJaegerExporter registers an exporter to Jaeger using Jaeger's own HTTP API.
+ // The default address is "http://localhost:14268/api/traces" if addr is left empty.
+ // Additional options can be supplied that can override the default behavior.
+ RegisterInsecureJaegerExporter(addr string, opts ...jaeger.CollectorEndpointOption) TracerProviderBuilder
+
+ // RegisterStdoutExporter exports pretty-formatted telemetry data to os.Stdout, or another writer if
+ // stdouttrace.WithWriter(w) is supplied as an option. Note that stdouttrace.WithoutTimestamps() doesn't
+ // work due to an upstream bug in OpenTelemetry. TODO: Fix that issue upstream.
+ RegisterStdoutExporter(opts ...stdouttrace.Option) TracerProviderBuilder
+
+ // WithOptions allows configuring the TracerProvider in various ways, e.g. tracesdk.WithSpanProcessor(sp)
+ // or tracesdk.WithIDGenerator()
+ WithOptions(opts ...tracesdk.TracerProviderOption) TracerProviderBuilder
+
+ // WithAttributes allows registering more default attributes for traces created by this TracerProvider.
+ // By default semantic conventions of version v1.4.0 are used, with "service.name" => "libgitops".
+ WithAttributes(attrs ...attribute.KeyValue) TracerProviderBuilder
+
+ // WithSynchronousExports allows configuring whether the exporters should export in synchronous mode
+ // (which must be used ONLY for testing) or (by default) the batching mode.
+ WithSynchronousExports(sync bool) TracerProviderBuilder
+
+ WithLogging(log bool) TracerProviderBuilder
+
+ // Build builds the SDKTracerProvider.
+ Build() (SDKTracerProvider, error)
+
+ // InstallGlobally builds the TracerProvider and registers it globally using otel.SetTracerProvider(tp).
+ InstallGlobally() error
+}
+
+type builder struct {
+ exporters []tracesdk.SpanExporter
+ errs []error
+ tpOpts []tracesdk.TracerProviderOption
+ attrs []attribute.KeyValue
+ sync bool
+ log bool
+}
+
+func (b *builder) RegisterInsecureOTelExporter(ctx context.Context, addr string, opts ...otlptracegrpc.Option) TracerProviderBuilder {
+ if len(addr) == 0 {
+ addr = "localhost:55680"
+ }
+
+ defaultOpts := []otlptracegrpc.Option{
+ otlptracegrpc.WithEndpoint(addr),
+ otlptracegrpc.WithInsecure(),
+ }
+ // Make sure to order the defaultOpts first, so opts can override the default ones
+ opts = append(defaultOpts, opts...)
+ // Run the main constructor for the otlptracegrpc exporter
+ exp, err := otlptracegrpc.New(ctx, opts...)
+ b.exporters = append(b.exporters, exp)
+ b.errs = append(b.errs, err)
+ return b
+}
+
+func (b *builder) RegisterInsecureJaegerExporter(addr string, opts ...jaeger.CollectorEndpointOption) TracerProviderBuilder {
+ defaultOpts := []jaeger.CollectorEndpointOption{}
+ // Only override if addr is set. Default is "http://localhost:14268/api/traces"
+ if len(addr) != 0 {
+ defaultOpts = append(defaultOpts, jaeger.WithEndpoint(addr))
+ }
+ // Make sure to order the defaultOpts first, so opts can override the default ones
+ opts = append(defaultOpts, opts...)
+ // Run the main constructor for the jaeger exporter
+ exp, err := jaeger.New(jaeger.WithCollectorEndpoint(opts...))
+ b.exporters = append(b.exporters, exp)
+ b.errs = append(b.errs, err)
+ return b
+}
+
+func (b *builder) RegisterStdoutExporter(opts ...stdouttrace.Option) TracerProviderBuilder {
+ defaultOpts := []stdouttrace.Option{
+ stdouttrace.WithPrettyPrint(),
+ }
+ // Make sure to order the defaultOpts first, so opts can override the default ones
+ opts = append(defaultOpts, opts...)
+ // Run the main constructor for the stdout exporter
+ exp, err := stdouttrace.New(opts...)
+ b.exporters = append(b.exporters, exp)
+ b.errs = append(b.errs, err)
+ return b
+}
+
+func (b *builder) WithOptions(opts ...tracesdk.TracerProviderOption) TracerProviderBuilder {
+ b.tpOpts = append(b.tpOpts, opts...)
+ return b
+}
+
+func (b *builder) WithAttributes(attrs ...attribute.KeyValue) TracerProviderBuilder {
+ b.attrs = append(b.attrs, attrs...)
+ return b
+}
+
+func (b *builder) WithSynchronousExports(sync bool) TracerProviderBuilder {
+ b.sync = sync
+ return b
+}
+
+func (b *builder) WithLogging(log bool) TracerProviderBuilder {
+ b.log = log
+ return b
+}
+
+var ErrNoExportersProvided = errors.New("no exporters provided")
+
+func (b *builder) Build() (SDKTracerProvider, error) {
+ // Combine and filter the errors from the exporter building
+ if err := multierr.Combine(b.errs...); err != nil {
+ return nil, err
+ }
+ if len(b.exporters) == 0 {
+ return nil, ErrNoExportersProvided
+ }
+ // TODO: Require at least one exporter
+
+ // By default, set the service name to "libgitops".
+ // This can be overridden through WithAttributes
+ defaultAttrs := []attribute.KeyValue{
+ semconv.ServiceNameKey.String("libgitops"),
+ }
+ // Make sure to order the defaultAttrs first, so b.attrs can override the default ones
+ attrs := append(defaultAttrs, b.attrs...)
+
+ // By default, register a resource with the given attributes
+ defaultTpOpts := []tracesdk.TracerProviderOption{
+ // Record information about this application in an Resource.
+ tracesdk.WithResource(resource.NewWithAttributes(semconv.SchemaURL, attrs...)),
+ }
+
+ // Register all exporters with the options list
+ for _, exporter := range b.exporters {
+ // The non-syncing mode shall only be used in testing. The batching mode must be used in production.
+ if b.sync {
+ defaultTpOpts = append(defaultTpOpts, tracesdk.WithSyncer(exporter))
+ } else {
+ defaultTpOpts = append(defaultTpOpts, tracesdk.WithBatcher(exporter))
+ }
+ }
+
+ // Make sure to order the defaultTpOpts first, so b.tpOpts can override the default ones
+ opts := append(defaultTpOpts, b.tpOpts...)
+ // Build the tracing provider
+ tpsdk := tracesdk.NewTracerProvider(opts...)
+ if b.log {
+ return NewLoggingTracerProvider(tpsdk), nil
+ }
+ return tpsdk, nil
+}
+
+func (b *builder) InstallGlobally() error {
+ // First, build the tracing provider...
+ tp, err := b.Build()
+ if err != nil {
+ return err
+ }
+ // ... and register it globally
+ otel.SetTracerProvider(tp)
+ return nil
+}
+
+// Shutdown tries to convert the trace.TracerProvider to a SDKTracerProvider to
+// access its Shutdown method to make sure all traces have been flushed using the exporters
+// before it's shutdown. If timeout == 0, the shutdown will be done without a grace period.
+// If timeout > 0, the shutdown will have a grace period of that period of time to shutdown.
+func Shutdown(ctx context.Context, tp trace.TracerProvider, timeout time.Duration) error {
+ return callSDKProvider(ctx, tp, timeout, func(ctx context.Context, sp SDKTracerProvider) error {
+ return sp.Shutdown(ctx)
+ })
+}
+
+// ForceFlush tries to convert the trace.TracerProvider to a SDKTracerProvider to
+// access its ForceFlush method to make sure all traces have been flushed using the exporters.
+// If timeout == 0, the flushing will be done without a grace period.
+// If timeout > 0, the flushing will have a grace period of that period of time.
+// Unlike Shutdown, which also flushes the traces, the provider is still operation after this.
+func ForceFlush(ctx context.Context, tp trace.TracerProvider, timeout time.Duration) error {
+ return callSDKProvider(ctx, tp, timeout, func(ctx context.Context, sp SDKTracerProvider) error {
+ return sp.ForceFlush(ctx)
+ })
+}
+
+func callSDKProvider(ctx context.Context, tp trace.TracerProvider, timeout time.Duration, fn func(context.Context, SDKTracerProvider) error) error {
+ p, ok := tp.(SDKTracerProvider)
+ if !ok {
+ return nil
+ }
+
+ if timeout != 0 {
+ // Do not make the application hang when it is shutdown.
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithTimeout(ctx, timeout)
+ defer cancel()
+ }
+
+ return fn(ctx, p)
+}
+
+// ShutdownGlobal shuts down the global TracerProvider using Shutdown()
+func ShutdownGlobal(ctx context.Context, timeout time.Duration) error {
+ return Shutdown(ctx, otel.GetTracerProvider(), timeout)
+}
+
+// ForceFlushGlobal flushes the global TracerProvider using ForceFlush()
+func ForceFlushGlobal(ctx context.Context, timeout time.Duration) error {
+ return ForceFlush(ctx, otel.GetTracerProvider(), timeout)
+}
diff --git a/pkg/tracing/tracing.go b/pkg/tracing/tracing.go
new file mode 100644
index 00000000..e8dbc12e
--- /dev/null
+++ b/pkg/tracing/tracing.go
@@ -0,0 +1,244 @@
+package tracing
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+
+ "go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/trace"
+ "go.uber.org/multierr"
+ "k8s.io/utils/pointer"
+)
+
+// TODO: Make a SpanProcessor that can output relevant YAML based on what's happening, for
+// unit testing.
+
+// FuncTracer is a higher-level type than the core trace.Tracer, which allows instrumenting
+// a function running in a closure. It'll automatically create a span with the given name
+// (plus maybe a pre-configured prefix). TraceFunc also returns a TraceFuncResult which allows
+// the error to be instrumented automatically as well.
+type FuncTracer interface {
+ trace.Tracer
+ // TraceFunc creates a trace with the given name while fn is executing.
+ // ErrFuncNotSupplied is returned if fn is nil.
+ TraceFunc(ctx context.Context, spanName string, fn TraceFunc, opts ...trace.SpanStartOption) TraceFuncResult
+}
+
+// FuncTracerFromGlobal returns a new FuncTracer with the given name that uses the globally-registered
+// tracing provider.
+func FuncTracerFromGlobal(name string) FuncTracer {
+ return TracerOptions{Name: name, UseGlobal: pointer.Bool(true)}
+}
+
+// BackgroundTracingContext
+func BackgroundTracingContext() context.Context {
+ ctx := context.Background()
+ noopSpan := trace.SpanFromContext(ctx)
+ return trace.ContextWithSpan(ctx, &tracerProviderSpan{noopSpan, true})
+}
+
+type tracerProviderSpan struct {
+ trace.Span
+ useGlobal bool
+}
+
+// Override the TracerProvider call if useGlobal is set
+func (s *tracerProviderSpan) TracerProvider() trace.TracerProvider {
+ if s.useGlobal {
+ return otel.GetTracerProvider()
+ }
+ return s.Span.TracerProvider()
+}
+
+type TracerNamed interface {
+ TracerName() string
+}
+
+//
+func FromContext(ctx context.Context, obj interface{}) FuncTracer {
+ name := ""
+ // TODO: Use a switch clause
+ tr, isTracerNamed := obj.(TracerNamed)
+ str, isString := obj.(string)
+ if isTracerNamed {
+ name = tr.TracerName()
+ } else if isString {
+ name = str
+ } else if obj != nil {
+ name = fmt.Sprintf("%T", obj)
+ }
+
+ switch obj {
+ case os.Stdin:
+ name = "os.Stdin"
+ case os.Stdout:
+ name = "os.Stdout"
+ case os.Stderr:
+ name = "os.Stderr"
+ case io.Discard:
+ name = "io.Discard"
+ }
+
+ return TracerOptions{Name: name, provider: trace.SpanFromContext(ctx).TracerProvider()}
+}
+
+func FromContextUnnamed(ctx context.Context) FuncTracer {
+ return FromContext(ctx, "")
+}
+
+// TraceFuncResult can either just simply return the error from TraceFunc, or register the error using
+// DefaultErrRegisterFunc (and then return it), or register the error using a custom error handling function.
+type TraceFuncResult interface {
+ // Error returns the error without any registration of it to the span.
+ Error() error
+ // Register registers the error using DefaultErrRegisterFunc.
+ Register() error
+ // RegisterCustom registers the error with the span using fn.
+ // ErrFuncNotSupplied is returned if fn is nil.
+ RegisterCustom(fn ErrRegisterFunc) error
+}
+
+// ErrFuncNotSupplied is raised when a supplied function callback is nil.
+var ErrFuncNotSupplied = errors.New("function argument not supplied")
+
+// MakeFuncNotSuppliedError formats ErrFuncNotSupplied in a standard way.
+func MakeFuncNotSuppliedError(name string) error {
+ return fmt.Errorf("%w: %s", ErrFuncNotSupplied, name)
+}
+
+// TraceFunc represents an instrumented function closure.
+type TraceFunc func(context.Context, trace.Span) error
+
+// ErrRegisterFunc should register the return error of TraceFunc err with the span
+type ErrRegisterFunc func(span trace.Span, err error)
+
+// TracerOptions implements TracerOption, trace.Tracer and FuncTracer.
+//var _ TracerOption = TracerOptions{}
+var _ trace.Tracer = TracerOptions{}
+var _ FuncTracer = TracerOptions{}
+
+// TracerOptions contains options for creating a trace.Tracer and FuncTracer.
+type TracerOptions struct {
+ // Name, if set to a non-empty value, will serve as the prefix for spans generated
+ // using the FuncTracer as "{o.Name}.{spanName}" (otherwise just "{spanName}"), and
+ // as the name of the trace.Tracer.
+ Name string
+ // UseGlobal specifies to default to the global tracing provider if true
+ // (or, just use a no-op TracerProvider, if false). This only applies if neither
+ // WithTracer or WithTracerProvider have been supplied.
+ UseGlobal *bool
+ // provider is what TracerProvider to use for creating a tracer. If nil,
+ // trace.NewNoopTracerProvider() is used.
+ provider trace.TracerProvider
+ // tracer can be set to use a specific tracer in Start(). If nil, a
+ // tracer is created using the provider.
+ tracer trace.Tracer
+}
+
+func (o TracerOptions) ApplyToTracer(target *TracerOptions) {
+ if len(o.Name) != 0 {
+ target.Name = o.Name
+ }
+ if o.UseGlobal != nil {
+ target.UseGlobal = o.UseGlobal
+ }
+ if o.provider != nil {
+ target.provider = o.provider
+ }
+ if o.tracer != nil {
+ target.tracer = o.tracer
+ }
+}
+
+// SpanName appends the name of the given function (spanName) to the given
+// o.Name, if set. The return value of this function is aimed to be
+// the name of the span, which will then be of the form "{o.Name}.{spanName}",
+// or just "{spanName}".
+func (o TracerOptions) fmtSpanName(spanName string) string {
+ // TODO: Does this match the other logic in FromContext?
+ if len(o.Name) != 0 && len(spanName) != 0 {
+ return o.Name + "." + spanName
+ }
+ // As either (or both) o.Name and spanName are empty strings, we can add them together
+ name := o.Name + spanName
+ if len(name) != 0 {
+ return name
+ }
+ return "unnamed_span"
+}
+
+func (o TracerOptions) tracerProvider() trace.TracerProvider {
+ if o.provider != nil {
+ return o.provider
+ } else if o.UseGlobal != nil && *o.UseGlobal {
+ return otel.GetTracerProvider()
+ } else {
+ return trace.NewNoopTracerProvider()
+ }
+}
+
+func (o TracerOptions) getTracer() trace.Tracer {
+ if o.tracer == nil {
+ o.tracer = o.tracerProvider().Tracer(o.Name)
+ }
+ return o.tracer
+}
+
+func (o TracerOptions) Start(ctx context.Context, spanName string, opts ...trace.SpanStartOption) (context.Context, trace.Span) {
+ return o.getTracer().Start(ctx, o.fmtSpanName(spanName), opts...)
+}
+
+func (o TracerOptions) TraceFunc(ctx context.Context, spanName string, fn TraceFunc, opts ...trace.SpanStartOption) TraceFuncResult {
+ ctx, span := o.Start(ctx, spanName, opts...)
+ // Close the span first in the returned TraceFuncResult, to be able to register the error before
+ // the span stops recording
+
+ // Catch if fn == nil
+ if fn == nil {
+ return &traceFuncResult{MakeFuncNotSuppliedError("FuncTracer.TraceFunc"), span}
+ }
+
+ return &traceFuncResult{fn(ctx, span), span}
+}
+
+// IMPORTANT TO DOCUMENT: Always call one of the given functions, otherwise the span won't be
+// closed
+type traceFuncResult struct {
+ err error
+ span trace.Span
+}
+
+func (r *traceFuncResult) Error() error {
+ // Important: Remember to end the span
+ r.span.End()
+ return r.err
+}
+
+func (r *traceFuncResult) Register() error {
+ return r.RegisterCustom(DefaultErrRegisterFunc)
+}
+
+func (r *traceFuncResult) RegisterCustom(fn ErrRegisterFunc) error {
+ if fn == nil {
+ err := multierr.Combine(r.err, MakeFuncNotSuppliedError("TraceFuncResult.RegisterCustom"))
+ DefaultErrRegisterFunc(r.span, err)
+ return err
+ }
+
+ // Register the error with the span, and potentially process it.
+ fn(r.span, r.err)
+ // Important: Remember to end the span
+ r.span.End()
+ return r.err
+}
+
+// DefaultErrRegisterFunc registers the error with the span using span.RecordError(err)
+// if the error is non-nil, and then returns the error unchanged.
+func DefaultErrRegisterFunc(span trace.Span, err error) {
+ if err != nil {
+ span.RecordError(err)
+ }
+}
diff --git a/pkg/tracing/tracing_test.go b/pkg/tracing/tracing_test.go
new file mode 100644
index 00000000..b03ca396
--- /dev/null
+++ b/pkg/tracing/tracing_test.go
@@ -0,0 +1,65 @@
+package tracing
+
+/*func TestTracerOptions_getTracer(t *testing.T) {
+ tests := []struct {
+ name string
+ global trace.TracerProvider
+ opts []TracerOption
+ want trace.Tracer
+ }{
+ {
+ name: "empty",
+ opts: []TracerOption{TracerOptions{}},
+ want: trace.NewNoopTracerProvider().Tracer(""),
+ },
+ {
+ name: "with name",
+ opts: []TracerOption{TracerOptions{Name: "foo"}},
+ want: trace.NewNoopTracerProvider().Tracer("foo"),
+ },
+ {
+ name: "use global",
+ global: customTp{},
+ opts: []TracerOption{TracerOptions{Name: "foo", UseGlobal: pointer.BoolPtr(true)}},
+ want: trace.NewNoopTracerProvider().Tracer("custom-foo"),
+ },
+ {
+ name: "use global",
+ global: customTp{},
+ opts: []TracerOption{TracerOptions{Name: "foo", UseGlobal: pointer.BoolPtr(true)}},
+ want: trace.NewNoopTracerProvider().Tracer("custom-foo"),
+ },
+ {
+ name: "use custom tp",
+ opts: []TracerOption{TracerOptions{Name: "foo"}, WithTracerProvider(customTp{})},
+ want: trace.NewNoopTracerProvider().Tracer("custom-foo"),
+ },
+ {
+ name: "use custom tracer",
+ opts: []TracerOption{TracerOptions{Name: "foo"}, WithTracer(customTp{}.Tracer("custom-bar"))},
+ want: customTp{}.Tracer("custom-bar"),
+ },
+ }
+ for _, tt := range tests {
+ earlierTp := otel.GetTracerProvider()
+ if tt.global != nil {
+ otel.SetTracerProvider(tt.global)
+ }
+ o := TracerOptions{}
+ for _, opt := range tt.opts {
+ opt.ApplyToTracer(&o)
+ }
+ got := o.getTracer()
+ assert.Equal(t, tt.want, got)
+ if tt.global != nil {
+ otel.SetTracerProvider(earlierTp)
+ }
+ }
+}
+
+type customTp struct{}
+
+func (customTp) Tracer(instrumentationName string, opts ...trace.TracerOption) trace.Tracer {
+ return trace.NewNoopTracerProvider().Tracer("custom-" + instrumentationName)
+}
+*/
diff --git a/pkg/util/compositeio/compositeio.go b/pkg/util/compositeio/compositeio.go
new file mode 100644
index 00000000..dcbca21f
--- /dev/null
+++ b/pkg/util/compositeio/compositeio.go
@@ -0,0 +1,38 @@
+package compositeio
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/weaveworks/libgitops/pkg/tracing"
+)
+
+func ReadCloser(r io.Reader, c io.Closer) io.ReadCloser {
+ return readCloser{r, c}
+}
+
+type readCloser struct {
+ io.Reader
+ io.Closer
+}
+
+func (rc readCloser) TracerName() string {
+ return fmt.Sprintf("compositeio.readCloser{%T, %T}", rc.Reader, rc.Closer)
+}
+
+var _ tracing.TracerNamed = readCloser{}
+
+func WriteCloser(w io.Writer, c io.Closer) io.WriteCloser {
+ return writeCloser{w, c}
+}
+
+type writeCloser struct {
+ io.Writer
+ io.Closer
+}
+
+func (wc writeCloser) TracerName() string {
+ return fmt.Sprintf("compositeio.writeCloser{%T, %T}", wc.Writer, wc.Closer)
+}
+
+var _ tracing.TracerNamed = writeCloser{}
diff --git a/pkg/util/fs.go b/pkg/util/fs.go
deleted file mode 100644
index 3e1f7d45..00000000
--- a/pkg/util/fs.go
+++ /dev/null
@@ -1,23 +0,0 @@
-package util
-
-import (
- "os"
-)
-
-func PathExists(path string) (bool, os.FileInfo) {
- info, err := os.Stat(path)
- if os.IsNotExist(err) {
- return false, nil
- }
-
- return true, info
-}
-
-func FileExists(filename string) bool {
- exists, info := PathExists(filename)
- if !exists {
- return false
- }
-
- return !info.IsDir()
-}
diff --git a/pkg/util/limitedio/limitedio.go b/pkg/util/limitedio/limitedio.go
new file mode 100644
index 00000000..745c955c
--- /dev/null
+++ b/pkg/util/limitedio/limitedio.go
@@ -0,0 +1,178 @@
+package limitedio
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "strconv"
+
+ "github.com/weaveworks/libgitops/pkg/util/structerr"
+)
+
+// DefaultMaxReadSize is 3 MB, which matches the default behavior of Kubernetes.
+// (The API server only accepts request bodies of 3MB by default.)
+const DefaultMaxReadSize Limit = 3 * 1024 * 1024
+const Infinite Limit = -1
+
+type Limit int64
+
+func (l Limit) String() string {
+ if l <= 0 {
+ return "infinite"
+ }
+ return strconv.FormatInt(int64(l), 10)
+}
+func (l Limit) Int64() int64 { return int64(l) }
+func (l Limit) Int() (int, error) {
+ i := int(l)
+ if int64(i) != int64(l) {
+ return 0, errors.New("the limit overflows int")
+ }
+ return i, nil
+}
+
+func (l Limit) IsLessThan(len int64) bool {
+ // l <= 0 means "l is infinite" => limit is larger than len => not less than len
+ if l <= 0 {
+ return false
+ }
+ return l.Int64() < len
+}
+
+func (l Limit) IsLessThanOrEqual(len int64) bool {
+ // l <= 0 means "l is infinite" => limit is larger than len => not less than len
+ if l <= 0 {
+ return false
+ }
+ return l.Int64() <= len
+}
+
+// ErrReadSizeOverflow returns a new *ReadSizeOverflowError
+func ErrReadSizeOverflow(maxReadSize Limit) *ReadSizeOverflowError {
+ return &ReadSizeOverflowError{MaxReadSize: maxReadSize}
+}
+
+// Enforce all struct errors implementing structerr.StructError
+var _ structerr.StructError = &ReadSizeOverflowError{}
+
+// ReadSizeOverflowError describes that a read or write has grown larger than
+// allowed. It is up to the implementer to describe what a "frame" in this
+// context is. This error is e.g. returned from the NewReader implementation.
+// If MaxReadSize is non-zero, it is included in the error text.
+//
+// This error can be checked for equality using errors.Is(err, &ReadSizeOverflowError{})
+type ReadSizeOverflowError struct {
+ // +optional
+ MaxReadSize Limit
+}
+
+func (e *ReadSizeOverflowError) Error() string {
+ msg := "frame was larger than maximum allowed size"
+ if e.MaxReadSize != 0 {
+ msg = fmt.Sprintf("%s %d bytes", msg, e.MaxReadSize)
+ }
+ return msg
+}
+
+func (e *ReadSizeOverflowError) Is(target error) bool {
+ _, ok := target.(*ReadSizeOverflowError)
+ return ok
+}
+
+// Reader is a specialized io.Reader helper type, which allows detecting when
+// a read grows larger than the allowed maxReadSize, returning a ErrReadSizeOverflow in that case.
+//
+// Internally there's a byte counter registering how many bytes have been read using the io.Reader
+// across all Read calls since the last ResetCounter reset, which resets the byte counter to 0. This
+// means that if you have successfully read one frame within bounds of maxReadSize, and want to
+// re-use the underlying io.Reader for the next frame, you shall run ResetCounter to start again.
+//
+// maxReadSize is specified when constructing an Reader, and defaults to DefaultMaxReadSize
+// if left as the empty value 0.
+// If maxReadSize is negative, the reader transparently forwards all calls without any restrictions.
+//
+// Note: The Reader implementation is not thread-safe, that is for higher-level interfaces
+// to implement and ensure.
+type Reader interface {
+ // The byte count returned across consecutive Read(p) calls are at maximum maxReadSize, until reset
+ // by ResetCounter.
+ io.Reader
+ // ResetCounter resets the byte counter counting how many bytes have been read using Read(p)
+ ResetCounter()
+}
+
+// NewReader makes a new Reader implementation.
+func NewReader(r io.Reader, maxReadSize Limit) Reader {
+ // Default maxReadSize if unset.
+ if maxReadSize == 0 {
+ maxReadSize = DefaultMaxReadSize
+ }
+
+ return &ioLimitedReader{
+ reader: r,
+ buf: new(bytes.Buffer),
+ maxReadSize: maxReadSize,
+ }
+}
+
+type ioLimitedReader struct {
+ reader io.Reader
+ buf *bytes.Buffer
+ maxReadSize Limit
+ byteCounter int64
+}
+
+func (l *ioLimitedReader) Read(b []byte) (int, error) {
+ // If l.maxReadSize is negative, put no restrictions on the read
+ maxReadSize := l.maxReadSize.Int64()
+ if maxReadSize < 0 {
+ return l.reader.Read(b)
+ }
+ // If we've already read more than we're allowed to, return an overflow error
+ if l.byteCounter > maxReadSize {
+ // Keep returning this error as long as relevant
+ return 0, ErrReadSizeOverflow(l.maxReadSize)
+
+ } else if l.byteCounter == maxReadSize {
+ // At this point we're not sure if the frame actually stops here or not
+ // To figure that out; read one more byte into tmp
+ tmp := make([]byte, 1)
+ tmpn, err := l.reader.Read(tmp)
+
+ // Write the read byte into the persistent buffer, for later use when l.byteCounter < l.maxReadSize
+ _, _ = l.buf.Write(tmp[:tmpn])
+ // Increase the byteCounter, as bytes written to buf counts as "read"
+ l.byteCounter += int64(tmpn)
+
+ // If no bytes were read; it's ok as we didn't exceed the limit. Return
+ // the error; often nil or io.EOF in this case.
+ if tmpn == 0 {
+ return 0, err
+ }
+ // Return that the frame overflowed now, as were able to read the byte (tmpn must be 1)
+ return 0, ErrReadSizeOverflow(l.maxReadSize)
+ } // else l.byteCounter < l.maxReadSize
+
+ // We can at maximum read bytesLeft bytes more, shrink b accordingly if b is larger than the
+ // maximum allowed amount to read.
+ bytesLeft := maxReadSize - l.byteCounter
+ if int64(len(b)) > bytesLeft {
+ b = b[:bytesLeft]
+ }
+
+ // First, flush any bytes in the buffer. By convention, the writes to buf have already
+ // increased byteCounter, so no need to do that now. No need to check the error as buf
+ // only returns io.EOF, and that's not important, it's even expected in most cases.
+ m, _ := l.buf.Read(b)
+ // Move the b slice forward m bytes as the m first bytes of b have now been populated
+ b = b[m:]
+
+ // Read from the reader into the rest of b
+ n, err := l.reader.Read(b)
+ // Register how many bytes have been read now additionally
+ l.byteCounter += int64(n)
+ return n, err
+}
+
+func (r *ioLimitedReader) ResetCounter() { r.byteCounter = 0 }
diff --git a/pkg/util/patch/patch.go b/pkg/util/patch/patch.go
index 11c29ea8..535be559 100644
--- a/pkg/util/patch/patch.go
+++ b/pkg/util/patch/patch.go
@@ -1,103 +1,88 @@
package patch
import (
- "bytes"
+ "encoding/json"
"fmt"
- "io/ioutil"
- "github.com/weaveworks/libgitops/pkg/runtime"
- "github.com/weaveworks/libgitops/pkg/serializer"
- "k8s.io/apimachinery/pkg/runtime/schema"
+ jsonbytepatcher "github.com/evanphx/json-patch"
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
)
-type Patcher interface {
- Create(new runtime.Object, applyFn func(runtime.Object) error) ([]byte, error)
- Apply(original, patch []byte, gvk schema.GroupVersionKind) ([]byte, error)
- ApplyOnFile(filePath string, patch []byte, gvk schema.GroupVersionKind) error
+// BytePatcherForType returns the right BytePatcher for the given
+// patch type.
+//
+// Note: if patchType is unknown, the return value will be nil, so make
+// sure you check the BytePatcher is non-nil before using it!
+func BytePatcherForType(patchType types.PatchType) BytePatcher {
+ switch patchType {
+ case types.JSONPatchType:
+ return JSONBytePatcher{}
+ case types.MergePatchType:
+ return MergeBytePatcher{}
+ case types.StrategicMergePatchType:
+ return StrategicMergeBytePatcher{}
+ default:
+ return nil
+ }
}
-func NewPatcher(s serializer.Serializer) Patcher {
- return &patcher{serializer: s}
-}
+// maximum number of operations a single json patch may contain.
+const maxJSONBytePatcherOperations = 10000
-type patcher struct {
- serializer serializer.Serializer
+type BytePatcher interface {
+ // TODO: SupportedType() types.PatchType
+ // currentData must be versioned bytes of the same GVK as into and patch.Data() (if merge patch)
+ // into must be an empty object
+ Apply(currentJSON, patchJSON []byte, schema strategicpatch.LookupPatchMeta) ([]byte, error)
}
-// Create is a helper that creates a patch out of the change made in applyFn
-func (p *patcher) Create(new runtime.Object, applyFn func(runtime.Object) error) (patchBytes []byte, err error) {
- var oldBytes, newBytes bytes.Buffer
- encoder := p.serializer.Encoder()
- old := new.DeepCopyObject().(runtime.Object)
-
- if err = encoder.Encode(serializer.NewJSONFrameWriter(&oldBytes), old); err != nil {
- return
- }
-
- if err = applyFn(new); err != nil {
- return
- }
-
- if err = encoder.Encode(serializer.NewJSONFrameWriter(&newBytes), new); err != nil {
- return
+type JSONBytePatcher struct{}
+
+func (JSONBytePatcher) Apply(currentJSON, patchJSON []byte, _ strategicpatch.LookupPatchMeta) ([]byte, error) {
+ // sanity check potentially abusive patches
+ // TODO(liggitt): drop this once golang json parser limits stack depth (https://github.com/golang/go/issues/31789)
+ // TODO(luxas): Go v1.15 has the above mentioned patch, what needs changing now?
+ if len(patchJSON) > 1024*1024 {
+ v := []interface{}{}
+ if err := json.Unmarshal(patchJSON, &v); err != nil {
+ return nil, fmt.Errorf("error decoding patch: %v", err)
+ }
}
- emptyObj, err := p.serializer.Scheme().New(old.GetObjectKind().GroupVersionKind())
- if err != nil {
- return
- }
-
- patchBytes, err = strategicpatch.CreateTwoWayMergePatch(oldBytes.Bytes(), newBytes.Bytes(), emptyObj)
- if err != nil {
- return nil, fmt.Errorf("CreateTwoWayMergePatch failed: %v", err)
- }
-
- return patchBytes, nil
-}
-
-func (p *patcher) Apply(original, patch []byte, gvk schema.GroupVersionKind) ([]byte, error) {
- emptyObj, err := p.serializer.Scheme().New(gvk)
+ patchObj, err := jsonbytepatcher.DecodePatch(patchJSON)
if err != nil {
return nil, err
}
-
- b, err := strategicpatch.StrategicMergePatch(original, patch, emptyObj)
- if err != nil {
- return nil, err
+ if len(patchObj) > maxJSONBytePatcherOperations {
+ return nil, errors.NewRequestEntityTooLargeError(
+ fmt.Sprintf("The allowed maximum operations in a JSON patch is %d, got %d",
+ maxJSONBytePatcherOperations, len(patchObj)))
}
-
- return p.serializerEncode(b)
+ return patchObj.Apply(currentJSON)
}
-func (p *patcher) ApplyOnFile(filePath string, patch []byte, gvk schema.GroupVersionKind) error {
- oldContent, err := ioutil.ReadFile(filePath)
- if err != nil {
- return err
- }
-
- newContent, err := p.Apply(oldContent, patch, gvk)
- if err != nil {
- return err
+type MergeBytePatcher struct{}
+
+func (MergeBytePatcher) Apply(currentJSON, patchJSON []byte, _ strategicpatch.LookupPatchMeta) ([]byte, error) {
+ // sanity check potentially abusive patches
+ // TODO(liggitt): drop this once golang json parser limits stack depth (https://github.com/golang/go/issues/31789)
+ // TODO(luxas): Go v1.15 has the above mentioned patch, what needs changing now?
+ if len(patchJSON) > 1024*1024 {
+ v := map[string]interface{}{}
+ if err := json.Unmarshal(patchJSON, &v); err != nil {
+ return nil, errors.NewBadRequest(fmt.Sprintf("error decoding patch: %v", err))
+ }
}
- return ioutil.WriteFile(filePath, newContent, 0644)
+ return jsonbytepatcher.MergePatch(currentJSON, patchJSON)
}
-// StrategicMergePatch returns an unindented, unorganized JSON byte slice,
-// this helper takes that as an input and returns the same JSON re-encoded
-// with the serializer so it conforms to a runtime.Object
-// TODO: Just use encoding/json.Indent here instead?
-func (p *patcher) serializerEncode(input []byte) ([]byte, error) {
- obj, err := p.serializer.Decoder().Decode(serializer.NewJSONFrameReader(serializer.FromBytes(input)))
- if err != nil {
- return nil, err
- }
-
- var result bytes.Buffer
- if err := p.serializer.Encoder().Encode(serializer.NewJSONFrameWriter(&result), obj); err != nil {
- return nil, err
- }
+type StrategicMergeBytePatcher struct{}
- return result.Bytes(), err
+func (StrategicMergeBytePatcher) Apply(currentJSON, patchJSON []byte, schema strategicpatch.LookupPatchMeta) ([]byte, error) {
+ // TODO: Also check for overflow here?
+ // TODO: What to do when schema is nil? error?
+ return strategicpatch.StrategicMergePatchUsingLookupPatchMeta(currentJSON, patchJSON, schema)
}
diff --git a/pkg/util/patch/patch_test.go b/pkg/util/patch/patch_test.go
index 9a3cf542..c9d1b01b 100644
--- a/pkg/util/patch/patch_test.go
+++ b/pkg/util/patch/patch_test.go
@@ -1,5 +1,9 @@
package patch
+/*
+
+TODO: Create good unit tests for this package!
+
import (
"bytes"
"testing"
@@ -58,3 +62,4 @@ func TestApplyPatch(t *testing.T) {
t.Fatal(err)
}
}
+*/
diff --git a/pkg/util/structerr/structerr.go b/pkg/util/structerr/structerr.go
new file mode 100644
index 00000000..e135b640
--- /dev/null
+++ b/pkg/util/structerr/structerr.go
@@ -0,0 +1,13 @@
+package structerr
+
+// StructError is an interface for errors that are structs, and can be compared for
+// errors.Is equality. Equality is determined by type equality, i.e. if the pointer
+// receiver is *MyError and target can be successfully casted using target.(*MyError),
+// then target and the pointer reciever error are equal, otherwise not.
+//
+// This is needed because errors.Is does not support equality like this for structs
+// by default.
+type StructError interface {
+ error
+ Is(target error) bool
+}
diff --git a/pkg/util/sync/lock.go b/pkg/util/sync/lock.go
new file mode 100644
index 00000000..488046bb
--- /dev/null
+++ b/pkg/util/sync/lock.go
@@ -0,0 +1,107 @@
+package sync
+
+import (
+ "sync"
+)
+
+type NamedLockMap interface {
+ LockByName(name string) LockWithData
+}
+
+type LockWithData interface {
+ Load(key interface{}) (value interface{}, ok bool)
+ QLoad(key interface{}) interface{}
+
+ // These automatically do a Lock()/Unlock() when executing
+ LoadOrStore(key, value interface{}) (actual interface{}, loaded bool)
+ QLoadOrStore(key, value interface{}) interface{}
+ Store(key, value interface{})
+
+ sync.Locker
+
+ RLocker() sync.Locker
+ RLock()
+ RUnlock()
+
+ /*RLock(key string) KeyedLockGetter
+ RUnlock(key string)
+
+ Lock(key string) KeyedLockSetter
+ Unlock(key string)*/
+}
+
+/*type KeyedLockGetter interface {
+ Get(key interface{}) interface{}
+}
+
+type KeyedLockSetter interface {
+ KeyedLockGetter
+ Set(key, value interface{})
+}*/
+
+func NewNamedLockMap() NamedLockMap {
+ return &namedLockMap{
+ locks: make(map[string]*lockWithData),
+ locksMu: &sync.Mutex{},
+ }
+}
+
+type namedLockMap struct {
+ // locks maps keys to their individual locks and associated data
+ locks map[string]*lockWithData
+ // locksMu guards reads and writes of the locks map
+ locksMu *sync.Mutex
+}
+
+func (l *namedLockMap) LockByName(name string) LockWithData {
+ // l.locksMu guards reads and writes of the c.locks map
+ l.locksMu.Lock()
+ defer l.locksMu.Unlock()
+
+ // Check if information about a transaction on this branch exists.
+ txState, ok := l.locks[name]
+ if ok {
+ return txState
+ }
+ // if not, grow the txs map by one and return it
+ l.locks[name] = &lockWithData{
+ RWMutex: &sync.RWMutex{},
+ Map: &sync.Map{},
+ }
+ return l.locks[name]
+}
+
+type lockWithData struct {
+ *sync.RWMutex
+ *sync.Map
+ //data map[interface{}]interface{}
+}
+
+func (l *lockWithData) QLoad(key interface{}) interface{} {
+ value, _ := l.Map.Load(key)
+ return value
+}
+
+func (l *lockWithData) QLoadOrStore(key, value interface{}) interface{} {
+ actual, _ := l.Map.LoadOrStore(key, value)
+ return actual
+}
+
+/*
+func (l *lockWithData) RLock() { l.mu.RLock() }
+func (l *lockWithData) RUnlock() { l.mu.RUnlock() }
+func (l *lockWithData) Lock() { l.mu.Lock() }
+func (l *lockWithData) Unlock() { l.mu.Unlock() }
+*/
+
+/*func (l *lockWithData) Get(key interface{}) interface{} {
+ return l.data[key]
+}
+
+type writableLockWithData struct {
+ *lockWithData
+}
+
+func (l *writableLockWithData) Set(key, value interface{}) {
+ l.data[key] = value
+}*/
diff --git a/pkg/util/sync/monitor.go b/pkg/util/sync/monitor.go
index f09c55ca..111a294d 100644
--- a/pkg/util/sync/monitor.go
+++ b/pkg/util/sync/monitor.go
@@ -1,31 +1,39 @@
package sync
-import "sync"
+import (
+ "errors"
+ "sync"
+)
// Monitor is a convenience wrapper around
// starting a goroutine with a wait group,
// which can be used to wait for the
// goroutine to stop.
type Monitor struct {
- wg *sync.WaitGroup
+ wg *sync.WaitGroup
+ err error
}
-func RunMonitor(f func()) (m *Monitor) {
- m = &Monitor{
+func RunMonitor(f func() error) *Monitor {
+ m := &Monitor{
wg: new(sync.WaitGroup),
}
m.wg.Add(1)
go func() {
- f()
+ m.err = f()
m.wg.Done()
}()
- return
+ return m
}
-func (m *Monitor) Wait() {
- if m != nil {
- m.wg.Wait()
+func (m *Monitor) Wait() error {
+ // TODO: Do we need this check?
+ if m == nil {
+ return errors.New("Monitor: invalid null pointer to m")
}
+ // TODO: maybe this could be easier implemented using just a channel?
+ m.wg.Wait()
+ return m.err
}
diff --git a/pkg/util/util.go b/pkg/util/util.go
deleted file mode 100644
index c80159c7..00000000
--- a/pkg/util/util.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package util
-
-import (
- "bytes"
- "crypto/rand"
- "encoding/hex"
- "fmt"
- "os/exec"
- "strings"
-)
-
-func ExecuteCommand(command string, args ...string) (string, error) {
- cmd := exec.Command(command, args...)
- out, err := cmd.CombinedOutput()
- if err != nil {
- return "", fmt.Errorf("command %q exited with %q: %v", cmd.Args, out, err)
- }
-
- return string(bytes.TrimSpace(out)), nil
-}
-
-func MatchPrefix(prefix string, fields ...string) ([]string, bool) {
- var prefixMatches, exactMatches []string
-
- for _, str := range fields {
- if str == prefix {
- exactMatches = append(exactMatches, str)
- } else if strings.HasPrefix(str, prefix) {
- prefixMatches = append(prefixMatches, str)
- }
- }
-
- // If we have exact matches, return them
- // and set the exact match boolean
- if len(exactMatches) > 0 {
- return exactMatches, true
- }
-
- return prefixMatches, false
-}
-
-func BoolPtr(b bool) *bool {
- return &b
-}
-
-// RandomSHA returns a hex-encoded string from {byteLen} random bytes.
-func RandomSHA(byteLen int) (string, error) {
- b := make([]byte, byteLen)
- _, err := rand.Read(b)
- if err != nil {
- return "", err
- }
- return hex.EncodeToString(b), nil
-}
diff --git a/pkg/util/watcher/dir_traversal.go b/pkg/util/watcher/dir_traversal.go
deleted file mode 100644
index 739ecf78..00000000
--- a/pkg/util/watcher/dir_traversal.go
+++ /dev/null
@@ -1,60 +0,0 @@
-package watcher
-
-import (
- "os"
- "path/filepath"
- "strings"
-)
-
-func (w *FileWatcher) getFiles() ([]string, error) {
- return WalkDirectoryForFiles(w.dir, w.opts.ValidExtensions, w.opts.ExcludeDirs)
-}
-
-func (w *FileWatcher) validFile(path string) bool {
- return isValidFile(path, w.opts.ValidExtensions, w.opts.ExcludeDirs)
-}
-
-// WalkDirectoryForFiles discovers all subdirectories and
-// returns a list of valid files in them
-func WalkDirectoryForFiles(dir string, validExts, excludeDirs []string) (files []string, err error) {
- err = filepath.Walk(dir,
- func(path string, info os.FileInfo, err error) error {
- if err != nil {
- return err
- }
-
- if !info.IsDir() {
- // Only include valid files
- if isValidFile(path, validExts, excludeDirs) {
- files = append(files, path)
- }
- }
-
- return nil
- })
-
- return
-}
-
-// isValidFile is used to filter out all unsupported
-// files based on if their extension is unknown or
-// if their path contains an excluded directory
-func isValidFile(path string, validExts, excludeDirs []string) bool {
- parts := strings.Split(filepath.Clean(path), string(os.PathSeparator))
- ext := filepath.Ext(parts[len(parts)-1])
- for _, suffix := range validExts {
- if ext == suffix {
- return true
- }
- }
-
- for i := 0; i < len(parts)-1; i++ {
- for _, exclude := range excludeDirs {
- if parts[i] == exclude {
- return false
- }
- }
- }
-
- return false
-}
diff --git a/pkg/util/watcher/event.go b/pkg/util/watcher/event.go
deleted file mode 100644
index 4da933d7..00000000
--- a/pkg/util/watcher/event.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package watcher
-
-import (
- "fmt"
- "strings"
-)
-
-// FileEvent is an enum describing a change in a file's state
-type FileEvent byte
-
-const (
- FileEventNone FileEvent = iota // 0
- FileEventModify // 1
- FileEventDelete // 2
- FileEventMove // 3
-)
-
-func (e FileEvent) String() string {
- switch e {
- case 0:
- return "NONE"
- case 1:
- return "MODIFY"
- case 2:
- return "DELETE"
- case 3:
- return "MOVE"
- }
-
- return "UNKNOWN"
-}
-
-// FileEvents is a slice of FileEvents
-type FileEvents []FileEvent
-
-var _ fmt.Stringer = FileEvents{}
-
-func (e FileEvents) String() string {
- strs := make([]string, 0, len(e))
- for _, ev := range e {
- strs = append(strs, ev.String())
- }
-
- return strings.Join(strs, ",")
-}
-
-func (e FileEvents) Bytes() []byte {
- b := make([]byte, 0, len(e))
- for _, event := range e {
- b = append(b, byte(event))
- }
-
- return b
-}
-
-// FileUpdates is a slice of FileUpdate pointers
-type FileUpdates []*FileUpdate
-
-// FileUpdate is used by watchers to
-// signal the state change of a file.
-type FileUpdate struct {
- Event FileEvent
- Path string
-}
diff --git a/pkg/util/watcher/filewatcher.go b/pkg/util/watcher/filewatcher.go
deleted file mode 100644
index 67db3354..00000000
--- a/pkg/util/watcher/filewatcher.go
+++ /dev/null
@@ -1,354 +0,0 @@
-package watcher
-
-import (
- "fmt"
- "path"
- "time"
-
- "github.com/rjeczalik/notify"
- log "github.com/sirupsen/logrus"
- "github.com/weaveworks/libgitops/pkg/util/sync"
- "golang.org/x/sys/unix"
-)
-
-const eventBuffer = 4096 // How many events and updates we can buffer before watching is interrupted
-var listenEvents = []notify.Event{notify.InDelete, notify.InCloseWrite, notify.InMovedFrom, notify.InMovedTo}
-
-var eventMap = map[notify.Event]FileEvent{
- notify.InDelete: FileEventDelete,
- notify.InCloseWrite: FileEventModify,
-}
-
-// combinedEvent describes multiple events that should be concatenated into a single event
-type combinedEvent struct {
- input []notify.Event // input is a slice of events to match (in bytes, it speeds up the comparison)
- output int // output is the event's index that should be returned, negative values equal nil
-}
-
-func (c *combinedEvent) match(events notifyEvents) (notify.EventInfo, bool) {
- if len(c.input) > len(events) {
- return nil, false // Not enough events, cannot match
- }
-
- for i := 0; i < len(c.input); i++ {
- if events[i].Event() != c.input[i] {
- return nil, false
- }
- }
-
- if c.output > 0 {
- return events[c.output], true
- }
-
- return nil, true
-}
-
-// combinedEvents describes the event combinations to concatenate,
-// this is iterated in order, so the longest matches should be first
-var combinedEvents = []combinedEvent{
- // DELETE + MODIFY => MODIFY
- {[]notify.Event{notify.InDelete, notify.InCloseWrite}, 1},
- // MODIFY + DELETE => NONE
- {[]notify.Event{notify.InCloseWrite, notify.InDelete}, -1},
-}
-
-type notifyEvents []notify.EventInfo
-type eventStream chan notify.EventInfo
-type FileUpdateStream chan *FileUpdate
-
-// Options specifies options for the FileWatcher
-type Options struct {
- // ExcludeDirs specifies what directories to not watch
- ExcludeDirs []string
- // BatchTimeout specifies the duration to wait after last event before dispatching grouped inotify events
- BatchTimeout time.Duration
- // ValidExtensions specifies what file extensions to look at
- ValidExtensions []string
-}
-
-// DefaultOptions returns the default options
-func DefaultOptions() Options {
- return Options{
- ExcludeDirs: []string{".git"},
- BatchTimeout: 1 * time.Second,
- ValidExtensions: []string{".yaml", ".yml", ".json"},
- }
-}
-
-// NewFileWatcher returns a list of files in the watched directory in
-// addition to the generated FileWatcher, it can be used to populate
-// MappedRawStorage fileMappings
-func NewFileWatcher(dir string) (w *FileWatcher, files []string, err error) {
- return NewFileWatcherWithOptions(dir, DefaultOptions())
-}
-
-// NewFileWatcher returns a list of files in the watched directory in
-// addition to the generated FileWatcher, it can be used to populate
-// MappedRawStorage fileMappings
-func NewFileWatcherWithOptions(dir string, opts Options) (w *FileWatcher, files []string, err error) {
- w = &FileWatcher{
- dir: dir,
- events: make(eventStream, eventBuffer),
- updates: make(FileUpdateStream, eventBuffer),
- batcher: sync.NewBatchWriter(opts.BatchTimeout),
- opts: opts,
- }
-
- log.Tracef("FileWatcher: Starting recursive watch for %q", dir)
- if err = notify.Watch(path.Join(dir, "..."), w.events, listenEvents...); err != nil {
- notify.Stop(w.events)
- } else if files, err = w.getFiles(); err == nil {
- w.monitor = sync.RunMonitor(w.monitorFunc)
- w.dispatcher = sync.RunMonitor(w.dispatchFunc)
- }
-
- return
-}
-
-// FileWatcher recursively monitors changes in files in the given directory
-// and sends out events based on their state changes. Only files conforming
-// to validSuffix are monitored. The FileWatcher can be suspended for a single
-// event at a time to eliminate updates by WatchStorage causing a loop.
-type FileWatcher struct {
- dir string
- events eventStream
- updates FileUpdateStream
- suspendEvent FileEvent
- monitor *sync.Monitor
- dispatcher *sync.Monitor
- opts Options
- // the batcher is used for properly sending many concurrent inotify events
- // as a group, after a specified timeout. This fixes the issue of one single
- // file operation being registered as many different inotify events
- batcher *sync.BatchWriter
-}
-
-func (w *FileWatcher) monitorFunc() {
- log.Debug("FileWatcher: Monitoring thread started")
- defer log.Debug("FileWatcher: Monitoring thread stopped")
- defer close(w.updates) // Close the update stream after the FileWatcher has stopped
-
- for {
- event, ok := <-w.events
- if !ok {
- return
- }
-
- if ievent(event).Mask&unix.IN_ISDIR != 0 {
- continue // Skip directories
- }
-
- if !w.validFile(event.Path()) {
- continue // Skip invalid files
- }
-
- updateEvent := convertEvent(event.Event())
- if w.suspendEvent > 0 && updateEvent == w.suspendEvent {
- w.suspendEvent = 0
- log.Debugf("FileWatcher: Skipping suspended event %s for path: %q", updateEvent, event.Path())
- continue // Skip the suspended event
- }
-
- // Get any events registered for the specific file, and append the specified event
- var eventList notifyEvents
- if val, ok := w.batcher.Load(event.Path()); ok {
- eventList = val.(notifyEvents)
- }
-
- eventList = append(eventList, event)
-
- // Register the event in the map, and dispatch all the events at once after the timeout
- w.batcher.Store(event.Path(), eventList)
- log.Debugf("FileWatcher: Registered inotify events %v for path %q", eventList, event.Path())
- }
-}
-
-func (w *FileWatcher) dispatchFunc() {
- log.Debug("FileWatcher: Dispatch thread started")
- defer log.Debug("FileWatcher: Dispatch thread stopped")
-
- for {
- // Wait until we have a batch dispatched to us
- ok := w.batcher.ProcessBatch(func(key, val interface{}) bool {
- // Concatenate all known events, and dispatch them to be handled one by one
- for _, event := range w.concatenateEvents(val.(notifyEvents)) {
- w.sendUpdate(event)
- }
-
- // Continue traversing the map
- return true
- })
- if !ok {
- return // The BatchWriter channel is closed, stop processing
- }
-
- log.Debug("FileWatcher: Dispatched events batch and reset the events cache")
- }
-}
-
-func (w *FileWatcher) sendUpdate(update *FileUpdate) {
- log.Debugf("FileWatcher: Sending update: %s -> %q", update.Event, update.Path)
- w.updates <- update
-}
-
-// GetFileUpdateStream gets the channel with FileUpdates
-func (w *FileWatcher) GetFileUpdateStream() FileUpdateStream {
- return w.updates
-}
-
-// Close closes active underlying resources
-func (w *FileWatcher) Close() {
- notify.Stop(w.events)
- w.batcher.Close()
- close(w.events) // Close the event stream
- w.monitor.Wait()
- w.dispatcher.Wait()
-}
-
-// Suspend enables a one-time suspend of the given event,
-// the FileWatcher will skip the given event once
-func (w *FileWatcher) Suspend(updateEvent FileEvent) {
- w.suspendEvent = updateEvent
-}
-
-func convertEvent(event notify.Event) FileEvent {
- if updateEvent, ok := eventMap[event]; ok {
- return updateEvent
- }
-
- return FileEventNone
-}
-
-func convertUpdate(event notify.EventInfo) *FileUpdate {
- fileEvent := convertEvent(event.Event())
- if fileEvent == FileEventNone {
- // This should never happen
- panic(fmt.Sprintf("invalid event for update conversion: %q", event.Event().String()))
- }
-
- return &FileUpdate{
- Event: fileEvent,
- Path: event.Path(),
- }
-}
-
-// moveCache caches an event during a move operation
-// and dispatches a FileUpdate if it's not cancelled
-type moveCache struct {
- watcher *FileWatcher
- event notify.EventInfo
- timer *time.Timer
-}
-
-func (w *FileWatcher) newMoveCache(event notify.EventInfo) *moveCache {
- m := &moveCache{
- watcher: w,
- event: event,
- }
-
- // moveCaches wait one second to be cancelled before firing
- m.timer = time.AfterFunc(time.Second, m.incomplete)
- return m
-}
-
-func (m *moveCache) cookie() uint32 {
- return ievent(m.event).Cookie
-}
-
-// If the moveCache isn't cancelled, the move is considered incomplete and this
-// method is fired. A complete move consists out of a "from" event and a "to" event,
-// if only one is received, the file is moved in/out of a watched directory, which
-// is treated as a normal creation/deletion by this method.
-func (m *moveCache) incomplete() {
- var event FileEvent
-
- switch m.event.Event() {
- case notify.InMovedFrom:
- event = FileEventDelete
- case notify.InMovedTo:
- event = FileEventModify
- default:
- // This should never happen
- panic(fmt.Sprintf("moveCache: unrecognized event: %v", m.event.Event()))
- }
-
- log.Tracef("moveCache: Timer expired for %d, dispatching...", m.cookie())
- m.watcher.sendUpdate(&FileUpdate{event, m.event.Path()})
-
- // Delete the cache after the timer has fired
- delete(moveCaches, m.cookie())
-}
-
-func (m *moveCache) cancel() {
- m.timer.Stop()
- delete(moveCaches, m.cookie())
- log.Tracef("moveCache: Dispatching cancelled for %d", m.cookie())
-}
-
-// moveCaches keeps track of active moves by cookie
-var moveCaches = make(map[uint32]*moveCache)
-
-// move processes InMovedFrom and InMovedTo events in any order
-// and dispatches FileUpdates when a move is detected
-func (w *FileWatcher) move(event notify.EventInfo) (moveUpdate *FileUpdate) {
- cookie := ievent(event).Cookie
- cache, ok := moveCaches[cookie]
- if !ok {
- // The cookie is not cached, create a new cache object for it
- moveCaches[cookie] = w.newMoveCache(event)
- return
- }
-
- sourcePath, destPath := cache.event.Path(), event.Path()
- switch event.Event() {
- case notify.InMovedFrom:
- sourcePath, destPath = destPath, sourcePath
- fallthrough
- case notify.InMovedTo:
- cache.cancel() // Cancel dispatching the cache's incomplete move
- moveUpdate = &FileUpdate{FileEventMove, destPath} // Register an internal, complete move instead
- log.Tracef("FileWatcher: Detected move: %q -> %q", sourcePath, destPath)
- }
-
- return
-}
-
-// concatenateEvents takes in a slice of events and concatenates
-// all events possible based on combinedEvents. It also manages
-// file moving and conversion from notifyEvents to FileUpdates
-func (w *FileWatcher) concatenateEvents(events notifyEvents) FileUpdates {
- for _, combinedEvent := range combinedEvents {
- // Test if the prefix of the given events matches combinedEvent.input
- if event, ok := combinedEvent.match(events); ok {
- // If so, replace combinedEvent.input prefix in events with combinedEvent.output and recurse
- concatenated := events[len(combinedEvent.input):]
- if event != nil { // Prepend the concatenation result event if any
- concatenated = append(notifyEvents{event}, concatenated...)
- }
-
- log.Tracef("FileWatcher: Concatenated events: %v -> %v", events, concatenated)
- return w.concatenateEvents(concatenated)
- }
- }
-
- // Convert the events to updates
- updates := make(FileUpdates, 0, len(events))
- for _, event := range events {
- switch event.Event() {
- case notify.InMovedFrom, notify.InMovedTo:
- // Send move-related events to w.move
- if update := w.move(event); update != nil {
- // Add the update to the list if we get something back
- updates = append(updates, update)
- }
- default:
- updates = append(updates, convertUpdate(event))
- }
- }
-
- return updates
-}
-
-func ievent(event notify.EventInfo) *unix.InotifyEvent {
- return event.Sys().(*unix.InotifyEvent)
-}
diff --git a/sanitation.md b/sanitation.md
new file mode 100644
index 00000000..05ad037f
--- /dev/null
+++ b/sanitation.md
@@ -0,0 +1,140 @@
+# Frame Sanitation
+
+The frame sanitation package that lives in `github.com/weaveworks/libgitops/pkg/frame/sanitation` takes care of formatting frames in a user-configurable and content-type-specific way.
+
+This is useful, for example, when one would like to standardize the formatting of YAML and/or JSON in a Git repository.
+
+## Goals
+
+- Provide a way to, in a content-type specific way, set a "default" formatting (Similar purpose as `gofmt` and `rustfmt`)
+- Minimize textual diffs when updating an object (e.g. writing back to git)
+- Allow the user to specifically choose formatting options like spacing, field ordering
+- Allow retaining auxiliary metadata in the frame, e.g. YAML comments
+
+## Default implementations
+
+- `sanitation.NewJSONYAML()` supports JSON and YAML with the following options:
+ - TODO
+
+## Examples
+
+### Minimizing YAML diffs
+
+Take this valid, but messy YAML file as an example of what a user might store in Git:
+
+"YAML File A":
+
+```yaml
+---
+# root
+
+apiVersion: sample.com/v1 # bla
+# hello
+items:
+# moveup
+ - item1 # hello
+ # bla
+ - item2 # hi
+
+kind: MyList # foo
+
+```
+
+Say that you want to append a `item-3` string to the `items` list. You do a `yaml.Unmarshal` and `yaml.Marshal` using your favorite library, and this is what you'll get:
+
+"YAML File B":
+
+```yaml
+apiVersion: sample.com/v1
+items:
+- item1
+- item2
+- item3
+kind: MyList
+```
+
+That's nice and all, it's semantically the right content. However, it's lost all structure from the original YAML document, and the diff is huge and hard to understand:
+
+```diff
+--- Expected
++++ Actual
+@@ -1,13 +1,7 @@
+----
+-# root
++apiVersion: sample.com/v1
++items:
++- item1
++- item2
++- item3
++kind: MyList
+-apiVersion: sample.com/v1 # bla
+-# hello
+-items:
+-# moveup
+- - item1 # hello
+- # bla
+- - item2 # hi
+-
+-kind: MyList # foo
+-
+```
+
+However, if the user calls `sanitize.Sanitize` and gives "YAML File A" as the "original" document and gives "YAML File B" as the "current" document, the JSON/YAML sanitizer will merge these as follows:
+
+```yaml
+# root
+apiVersion: sample.com/v1 # bla
+# hello
+items:
+ # moveup
+ - item1 # hello
+ # bla
+ - item2 # hi
+ - item3
+kind: MyList # foo
+```
+
+With the diff:
+
+```diff
+--- Expected
++++ Actual
+@@ -1,4 +1,2 @@
+----
+ # root
+-
+ apiVersion: sample.com/v1 # bla
+@@ -6,7 +4,7 @@
+ items:
+-# moveup
++ # moveup
+ - item1 # hello
+- # bla
++ # bla
+ - item2 # hi
+-
++ - item3
+ kind: MyList # foo
+```
+
+Quite a difference! We can see that the
+
+- Comments from the original document are preserved
+ - This is achieved by walking the YAML nodes in the "original" document, and the "current" document. Whenever a comment is found in the "original" document, it is copied over to the "current".
+ -
+- Comments are now aligned with the default indentation at that context
+ - As per the [YAML 1.2 spec](https://yaml.org/spec/1.2/spec.html#id2767100) "comments are not associated with a particular node".
+ - In practice, though, [gopkg.in/yaml.v3
+](https://pkg.go.dev/gopkg.in/yaml.v3) (and by extension, kyaml) **does attach** comments to YAML nodes. Arguably, this is also what users do expect.
+ - Hence, what is happening when sanitizing this document is that all comments line up on the same indentation as it's context.
+- The unnecessary `---` separator has been removed
+ - Frame separators should not be part of the frame
+ - Framing is handled by the [framer](framing.md)
+- The list indentation is preserved
+ - That is, the list items of `items` are indented like in the original A document, but unlike current B
+- Unnecessary newlines are removed
+
+TODO: Investigate what happens to comments when you prepend an item to a list.
+TODO: Show that it trims whitespace, e.g. `kind : Foo` becomes `kind: Foo`
+TODO: Show that it removes empty fields from the YAML, e.g. `status: {}` or `creationTimestamp: null`
+TODO: Share the context on why the above actually show up in YAML in the first place.