From 6e1bed1b3c2f050a751ef3e9f292bc94ee1f8429 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 28 Dec 2020 21:58:03 +0200 Subject: [PATCH 001/149] Guard against nil in GVKForObject --- pkg/serializer/serializer.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/serializer/serializer.go b/pkg/serializer/serializer.go index eb798c91..8bf691d2 100644 --- a/pkg/serializer/serializer.go +++ b/pkg/serializer/serializer.go @@ -244,6 +244,10 @@ func prioritizedVersionForGroup(scheme *runtime.Scheme, groupName string) (schem } func GVKForObject(scheme *runtime.Scheme, obj runtime.Object) (schema.GroupVersionKind, error) { + // Safety check: one should not do this + if obj == nil || obj.GetObjectKind() == nil { + return schema.GroupVersionKind{}, fmt.Errorf("GVKForObject: obj or obj.GetObjectKind() must not be nil") + } // If we already have TypeMeta filled in here, just use it // TODO: This is probably not needed gvk := obj.GetObjectKind().GroupVersionKind() From 35ed90d63f8f703676a5dfd7ad18a495f3a88e71 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 28 Dec 2020 22:16:59 +0200 Subject: [PATCH 002/149] Handle the case where the kind directory does not exist when listing --- pkg/storage/rawstorage.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/pkg/storage/rawstorage.go b/pkg/storage/rawstorage.go index 93304332..0533595d 100644 --- a/pkg/storage/rawstorage.go +++ b/pkg/storage/rawstorage.go @@ -146,7 +146,16 @@ func (r *GenericRawStorage) List(kind KindKey) ([]ObjectKey, error) { return nil, err } - entries, err := ioutil.ReadDir(r.kindKeyPath(kind)) + // If the expected directory does not exist, just return an empty (nil) slice + dir := r.kindKeyPath(kind) + if ok, fi := util.PathExists(dir); !ok { + return nil, nil + } else if !fi.IsDir() { + return nil, fmt.Errorf("expected that %s is a directory", dir) + } + + // When we know that path is a directory, go ahead and read it + entries, err := ioutil.ReadDir(dir) if err != nil { return nil, err } From 69e47d6580cda2f5c59433a70c975eaaebeb1c69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 28 Dec 2020 22:27:36 +0200 Subject: [PATCH 003/149] Handle the case in Storage.Patch where the underlying RawStorage returns YAML --- pkg/storage/storage.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index 4d942324..6e75c047 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -14,6 +14,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/yaml" ) var ( @@ -204,6 +205,16 @@ func (s *GenericStorage) Patch(key ObjectKey, patch []byte) error { return err } + // TODO: This is a bit of a hack, but for now this works. The patcher expects only JSON, hence + // we need to handle the case when raw.Read doesn't return JSON bytes. In the future however, this + // logic should probably be rewritten completely. + if s.raw.ContentType(key) == serializer.ContentTypeYAML { + oldContent, err = yaml.YAMLToJSONStrict(oldContent) + if err != nil { + return err + } + } + newContent, err := s.patcher.Apply(oldContent, patch, key.GetGVK()) if err != nil { return err From 2693d1d5aa58c9d6c8b512b0cc123d3e0473ace9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 11 Jan 2021 21:35:12 +0200 Subject: [PATCH 004/149] Add a labels filter, split name & namespace to separate filters, implement controller-runtime-style options, and relevant opt interfaces for interoperability. --- pkg/filter/interfaces.go | 52 ++++++++------------------------ pkg/filter/labels.go | 46 ++++++++++++++++++++++++++++ pkg/filter/name.go | 41 +++++++++++++------------ pkg/filter/namespace.go | 45 ++++++++++++++++++++++++++++ pkg/filter/options.go | 65 +++++++++++++++++++++++++++++----------- pkg/filter/uid.go | 37 ++++++++++++----------- 6 files changed, 189 insertions(+), 97 deletions(-) create mode 100644 pkg/filter/labels.go create mode 100644 pkg/filter/namespace.go diff --git a/pkg/filter/interfaces.go b/pkg/filter/interfaces.go index 62d3cd3f..a097112b 100644 --- a/pkg/filter/interfaces.go +++ b/pkg/filter/interfaces.go @@ -1,48 +1,20 @@ package filter -import "github.com/weaveworks/libgitops/pkg/runtime" +import ( + "errors" -// ListFilter is an interface for pipe-like list filtering behavior. -type ListFilter interface { - // Filter walks through all objects in obj, assesses whether the object - // matches the filter parameters, and conditionally adds it to the return - // slice or not. This method can be thought of like an UNIX pipe. - Filter(objs ...runtime.Object) ([]runtime.Object, error) -} + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var ( + // ErrInvalidFilterParams describes an error where invalid parameters were given + // to a filter. + ErrInvalidFilterParams = errors.New("invalid parameters given to filter") +) // ObjectFilter is an interface for filtering objects one-by-one. type ObjectFilter interface { - // Filter takes in one object (at once, per invocation), and returns a + // Match takes in one object (at once, per invocation), and returns a // boolean whether the object matches the filter parameters, or not. - Filter(obj runtime.Object) (bool, error) -} - -// ObjectToListFilter transforms an ObjectFilter into a ListFilter. If of is nil, -// this function panics. -func ObjectToListFilter(of ObjectFilter) ListFilter { - if of == nil { - panic("programmer error: of ObjectFilter must not be nil in ObjectToListFilter") - } - return &objectToListFilter{of} -} - -type objectToListFilter struct { - of ObjectFilter -} - -// Filter implements ListFilter, but uses an ObjectFilter for the underlying logic. -func (f objectToListFilter) Filter(objs ...runtime.Object) (retarr []runtime.Object, err error) { - // Walk through all objects - for _, obj := range objs { - // Match them one-by-one against the ObjectFilter - match, err := f.of.Filter(obj) - if err != nil { - return nil, err - } - // If the object matches, include it in the return array - if match { - retarr = append(retarr, obj) - } - } - return + Match(obj client.Object) (bool, error) } diff --git a/pkg/filter/labels.go b/pkg/filter/labels.go new file mode 100644 index 00000000..24ef9f10 --- /dev/null +++ b/pkg/filter/labels.go @@ -0,0 +1,46 @@ +package filter + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// LabelsFilter implements ObjectFilter and FilterOption. +// It also implements client.{List,DeleteAllOf}Option so +// it can be passed into client.Client.{List,DeleteAllOf} +// as a way to conveniently filter those lists. +var _ ObjectFilter = LabelsFilter{} +var _ FilterOption = LabelsFilter{} +var _ client.ListOption = LabelsFilter{} +var _ client.DeleteAllOfOption = LabelsFilter{} + +// LabelsFilter is an ObjectFilter that compares metav1.Object.GetLabels() +// to the LabelSelector field. +type LabelsFilter struct { + // LabelSelector filters results by label. Use SetLabelSelector to + // set from raw string form. + // +required + LabelSelector labels.Selector +} + +// Match implements ObjectFilter +func (f LabelsFilter) Match(obj client.Object) (bool, error) { + // Require f.Namespace to always be set. + if f.LabelSelector == nil { + return false, fmt.Errorf("the LabelsFilter.LabelSelector field must not be nil: %w", ErrInvalidFilterParams) + } + + return f.LabelSelector.Matches(labels.Set(obj.GetLabels())), nil +} + +// ApplyToList implements client.ListOption, but is just a "dummy" implementation in order to implement +// the interface, so that this struct can be passed to client.Reader.List() +func (f LabelsFilter) ApplyToList(_ *client.ListOptions) {} +func (f LabelsFilter) ApplyToDeleteAllOf(_ *client.DeleteAllOfOptions) {} + +// ApplyToFilterOptions implements FilterOption +func (f LabelsFilter) ApplyToFilterOptions(target *FilterOptions) { + target.ObjectFilters = append(target.ObjectFilters, f) +} diff --git a/pkg/filter/name.go b/pkg/filter/name.go index 42e516cd..ade3d995 100644 --- a/pkg/filter/name.go +++ b/pkg/filter/name.go @@ -4,40 +4,36 @@ import ( "fmt" "strings" - "github.com/weaveworks/libgitops/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" ) -// NameFilter implements ObjectFilter and ListOption. +// NameFilter implements ObjectFilter and FilterOption. +// It also implements client.{List,DeleteAllOf}Option so +// it can be passed into client.Client.{List,DeleteAllOf} +// as a way to conveniently filter those lists. var _ ObjectFilter = NameFilter{} -var _ ListOption = NameFilter{} +var _ FilterOption = NameFilter{} +var _ client.ListOption = NameFilter{} +var _ client.DeleteAllOfOption = NameFilter{} -// NameFilter is an ObjectFilter that compares runtime.Object.GetName() +// NameFilter is an ObjectFilter that compares Object.GetName() // to the Name field by either equality or prefix. type NameFilter struct { // Name matches the object by .metadata.name. // +required Name string - // Namespace matches the object by .metadata.namespace. If left as - // an empty string, it is ignored when filtering. - // +optional - Namespace string - // MatchPrefix whether the name (not namespace) matching should be exact, or prefix-based. + // MatchPrefix whether the name matching should be exact, or prefix-based. // +optional MatchPrefix bool } -// Filter implements ObjectFilter -func (f NameFilter) Filter(obj runtime.Object) (bool, error) { +// Match implements ObjectFilter +func (f NameFilter) Match(obj client.Object) (bool, error) { // Require f.Name to always be set. if len(f.Name) == 0 { return false, fmt.Errorf("the NameFilter.Name field must not be empty: %w", ErrInvalidFilterParams) } - // If f.Namespace is set, and it does not match the object, return false - if len(f.Namespace) > 0 && f.Namespace != obj.GetNamespace() { - return false, nil - } - // If the Name should be matched by the prefix, use strings.HasPrefix if f.MatchPrefix { return strings.HasPrefix(obj.GetName(), f.Name), nil @@ -46,9 +42,12 @@ func (f NameFilter) Filter(obj runtime.Object) (bool, error) { return f.Name == obj.GetName(), nil } -// ApplyToListOptions implements ListOption, and adds itself converted to -// a ListFilter to ListOptions.Filters. -func (f NameFilter) ApplyToListOptions(target *ListOptions) error { - target.Filters = append(target.Filters, ObjectToListFilter(f)) - return nil +// ApplyToList implements client.ListOption, but is just a "dummy" implementation in order to implement +// the interface, so that this struct can be passed to client.Reader.List() +func (f NameFilter) ApplyToList(_ *client.ListOptions) {} +func (f NameFilter) ApplyToDeleteAllOf(_ *client.DeleteAllOfOptions) {} + +// ApplyToFilterOptions implements FilterOption +func (f NameFilter) ApplyToFilterOptions(target *FilterOptions) { + target.ObjectFilters = append(target.ObjectFilters, f) } diff --git a/pkg/filter/namespace.go b/pkg/filter/namespace.go new file mode 100644 index 00000000..ae1c8842 --- /dev/null +++ b/pkg/filter/namespace.go @@ -0,0 +1,45 @@ +package filter + +import ( + "fmt" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// NamespaceFilter implements ObjectFilter and FilterOption. +// It also implements client.{List,DeleteAllOf}Option so +// it can be passed into client.Client.{List,DeleteAllOf} +// as a way to conveniently filter those lists. +var _ ObjectFilter = NamespaceFilter{} +var _ FilterOption = NamespaceFilter{} +var _ client.ListOption = NamespaceFilter{} +var _ client.DeleteAllOfOption = NamespaceFilter{} + +// NamespaceFilter is an ObjectFilter that compares Object.GetNamespace() +// to the Namespace field. +type NamespaceFilter struct { + // Namespace matches the object by .metadata.namespace. If left as + // an empty string, it is ignored when filtering. + // +required + Namespace string +} + +// Match implements ObjectFilter +func (f NamespaceFilter) Match(obj client.Object) (bool, error) { + // Require f.Namespace to always be set. + if len(f.Namespace) == 0 { + return false, fmt.Errorf("the NamespaceFilter.Namespace field must not be empty: %w", ErrInvalidFilterParams) + } + // Otherwise, just use an equality check + return f.Namespace == obj.GetNamespace(), nil +} + +// ApplyToList implements client.ListOption, but is just a "dummy" implementation in order to implement +// the interface, so that this struct can be passed to client.Reader.List() +func (f NamespaceFilter) ApplyToList(_ *client.ListOptions) {} +func (f NamespaceFilter) ApplyToDeleteAllOf(_ *client.DeleteAllOfOptions) {} + +// ApplyToFilterOptions implements FilterOption +func (f NamespaceFilter) ApplyToFilterOptions(target *FilterOptions) { + target.ObjectFilters = append(target.ObjectFilters, f) +} diff --git a/pkg/filter/options.go b/pkg/filter/options.go index 4a831dda..6608da30 100644 --- a/pkg/filter/options.go +++ b/pkg/filter/options.go @@ -1,27 +1,56 @@ package filter -// ListOptions is a generic struct for listing options. -type ListOptions struct { - // Filters contains a chain of ListFilters, which will be processed in order and pipe the - // available objects through before returning. - Filters []ListFilter +import "sigs.k8s.io/controller-runtime/pkg/client" + +// FilterOption is an interface for implementations that know how to +// mutate FilterOptions. +type FilterOption interface { + // ApplyToFilterOptions applies the configuration of the current object into a target FilterOptions struct. + ApplyToFilterOptions(target *FilterOptions) } -// ListOption is an interface which can be passed into e.g. List() methods as a variadic-length -// argument list. -type ListOption interface { - // ApplyToListOptions applies the configuration of the current object into a target ListOptions struct. - ApplyToListOptions(target *ListOptions) error +// FilterOptions is a set of options for filtering. It implements the ObjectFilter interface +// itself, so it can be used kind of as a multi-ObjectFilter. +type FilterOptions struct { + // ObjectFilters contains a set of filters for a single object. All of the filters must return + // true an a nil error for Match(obj) to return (true, nil). + ObjectFilters []ObjectFilter } -// MakeListOptions makes a completed ListOptions struct from a list of ListOption implementations. -func MakeListOptions(opts ...ListOption) (*ListOptions, error) { - o := &ListOptions{} - for _, opt := range opts { - // For every option, apply it into o, and check if there's an error - if err := opt.ApplyToListOptions(o); err != nil { - return nil, err +// Match matches the object against all the ObjectFilters. +func (o *FilterOptions) Match(obj client.Object) (bool, error) { + for _, filter := range o.ObjectFilters { + matched, err := filter.Match(obj) + if err != nil { + return false, err + } + if !matched { + return false, nil } } - return o, nil + return true, nil +} + +// ApplyToFilterOptions implements FilterOption +func (o *FilterOptions) ApplyToFilterOptions(target *FilterOptions) { + target.ObjectFilters = append(target.ObjectFilters, o.ObjectFilters...) +} + +// ApplyOptions applies the given FilterOptions to itself and returns itself. +func (o *FilterOptions) ApplyOptions(opts []FilterOption) *FilterOptions { + for _, opt := range opts { + opt.ApplyToFilterOptions(o) + } + return o +} + +// ApplyOption applies one option that aims to implement FilterOption, +// but at compile-time maybe does not for sure. This can be used for +// lists of other Options that possibly implement FilterOption in the +// following way: for _, opt := range opts { filterOpts.ApplyOption(opt) } +func (o *FilterOptions) ApplyOption(opt interface{}) *FilterOptions { + if fOpt, ok := opt.(FilterOption); ok { + fOpt.ApplyToFilterOptions(o) + } + return o } diff --git a/pkg/filter/uid.go b/pkg/filter/uid.go index eea48ffd..1aedab3f 100644 --- a/pkg/filter/uid.go +++ b/pkg/filter/uid.go @@ -1,25 +1,23 @@ package filter import ( - "errors" "fmt" "strings" - "github.com/weaveworks/libgitops/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" ) -var ( - // ErrInvalidFilterParams describes an error where invalid parameters were given - // to a filter. - ErrInvalidFilterParams = errors.New("invalid parameters given to filter") -) - -// UIDFilter implements ObjectFilter and ListOption. +// UIDFilter implements ObjectFilter and FilterOption. +// It also implements client.{List,DeleteAllOf}Option so +// it can be passed into client.Client.{List,DeleteAllOf} +// as a way to conveniently filter those lists. var _ ObjectFilter = UIDFilter{} -var _ ListOption = UIDFilter{} +var _ FilterOption = UIDFilter{} +var _ client.ListOption = UIDFilter{} +var _ client.DeleteAllOfOption = UIDFilter{} -// UIDFilter is an ObjectFilter that compares runtime.Object.GetUID() to +// UIDFilter is an ObjectFilter that compares Object.GetUID() to // the UID field by either equality or prefix. The UID field is required, // otherwise ErrInvalidFilterParams is returned. type UIDFilter struct { @@ -31,8 +29,8 @@ type UIDFilter struct { MatchPrefix bool } -// Filter implements ObjectFilter -func (f UIDFilter) Filter(obj runtime.Object) (bool, error) { +// Match implements ObjectFilter +func (f UIDFilter) Match(obj client.Object) (bool, error) { // Require f.UID to always be set. if len(f.UID) == 0 { return false, fmt.Errorf("the UIDFilter.UID field must not be empty: %w", ErrInvalidFilterParams) @@ -45,9 +43,12 @@ func (f UIDFilter) Filter(obj runtime.Object) (bool, error) { return f.UID == obj.GetUID(), nil } -// ApplyToListOptions implements ListOption, and adds itself converted to -// a ListFilter to ListOptions.Filters. -func (f UIDFilter) ApplyToListOptions(target *ListOptions) error { - target.Filters = append(target.Filters, ObjectToListFilter(f)) - return nil +// ApplyToList implements client.ListOption, but is just a "dummy" implementation in order to implement +// the interface, so that this struct can be passed to client.Reader.List() +func (f UIDFilter) ApplyToList(_ *client.ListOptions) {} +func (f UIDFilter) ApplyToDeleteAllOf(_ *client.DeleteAllOfOptions) {} + +// ApplyToFilterOptions implements FilterOption +func (f UIDFilter) ApplyToFilterOptions(target *FilterOptions) { + target.ObjectFilters = append(target.ObjectFilters, f) } From 9c060fba00045718022236982d199f482d431f3d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 11 Jan 2021 21:38:15 +0200 Subject: [PATCH 005/149] Make pretty encoding catch also cases when the target object implements MarshalJSON(), by always first encoding "non-pretty", and indenting the JSON a configurable amount of spaces after encoding, but before handing the data to the framewriter. --- pkg/serializer/encode.go | 72 ++++++++++++++++++++++++++++++---------- 1 file changed, 54 insertions(+), 18 deletions(-) diff --git a/pkg/serializer/encode.go b/pkg/serializer/encode.go index 77061932..50badaab 100644 --- a/pkg/serializer/encode.go +++ b/pkg/serializer/encode.go @@ -1,16 +1,20 @@ package serializer import ( - "github.com/sirupsen/logrus" + "bytes" + "encoding/json" + "strings" + "github.com/weaveworks/libgitops/pkg/util" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) type EncodingOptions struct { - // Use pretty printing when writing to the output. (Default: true) - // TODO: Fix that sometimes omitempty fields aren't respected - Pretty *bool + // Indent JSON encoding output with this many spaces. (Default: nil, means no indentation) + // Only applicable to ContentTypeJSON framers. + // TODO: Make this a property of the FrameWriter instead? + JSONIndent *int // Whether to preserve YAML comments internally. This only works for objects embedding metav1.ObjectMeta. // Only applicable to ContentTypeYAML framers. // Using any other framer will be silently ignored. Usage of this option also requires setting @@ -24,8 +28,18 @@ type EncodingOptions struct { type EncodingOptionsFunc func(*EncodingOptions) func WithPrettyEncode(pretty bool) EncodingOptionsFunc { + if pretty { + return WithJSONIndent(2) + } return func(opts *EncodingOptions) { - opts.Pretty = &pretty + // disable the indenting + opts.JSONIndent = nil + } +} + +func WithJSONIndent(spaces int) EncodingOptionsFunc { + return func(opts *EncodingOptions) { + opts.JSONIndent = &spaces } } @@ -44,7 +58,7 @@ func WithEncodingOptions(newOpts EncodingOptions) EncodingOptionsFunc { func defaultEncodeOpts() *EncodingOptions { return &EncodingOptions{ - Pretty: util.BoolPtr(true), + JSONIndent: util.IntPtr(2), // Default to "pretty encoding" PreserveComments: util.BoolPtr(false), } } @@ -75,6 +89,7 @@ func newEncoder(schemeAndCodec *schemeAndCodec, opts EncodingOptions) Encoder { // internal object given to the preferred external groupversion. No conversion will happen // if the given object is of an external version. // TODO: This should automatically convert to the preferred version +// TODO: Fix that sometimes omitempty fields aren't respected func (e *encoder) Encode(fw FrameWriter, objs ...runtime.Object) error { for _, obj := range objs { // Get the kind for the given object @@ -110,23 +125,23 @@ func (e *encoder) EncodeForGroupVersion(fw FrameWriter, obj runtime.Object, gv s return ErrUnsupportedContentType } - // Choose the pretty or non-pretty one + // Choose the default, non-pretty serializer, as we prettify if needed later + // We technically could use the JSON PrettySerializer here, but it does not catch the + // cases where the JSON iterator invokes MarshalJSON() on an object, and that object + // returns non-pretty bytes (e.g. *unstructured.Unstructured). Hence, it is more robust + // and extensible to always use the non-pretty serializer, and only on request indent + // a given number of spaces after JSON encoding. encoder := serializerInfo.Serializer - // Use the pretty serializer if it was asked for and is defined for the content type - if *e.opts.Pretty { - // Apparently not all SerializerInfos have this field defined (e.g. YAML) - // TODO: This could be considered a bug in upstream, create an issue - if serializerInfo.PrettySerializer != nil { - encoder = serializerInfo.PrettySerializer - } else { - logrus.Debugf("PrettySerializer for ContentType %s is nil, falling back to Serializer.", fw.ContentType()) - } - } - // Get a version-specific encoder for the specified groupversion versionEncoder := encoderForVersion(e.scheme, encoder, gv) + // Check if the user requested prettified JSON output. + // If the ContentType is JSON this is ok, we will intent the encode output on the fly. + if e.opts.JSONIndent != nil && fw.ContentType() == ContentTypeJSON { + fw = &jsonPrettyFrameWriter{indent: *e.opts.JSONIndent, fw: fw} + } + // Cast the object to a metav1.Object to get access to annotations metaobj, ok := toMetaObject(obj) // For objects without ObjectMeta, the cast will fail. Allow that failure and do "normal" encoding @@ -150,3 +165,24 @@ func encoderForVersion(scheme *runtime.Scheme, encoder runtime.Encoder, gv schem true, // convert if needed before encode ) } + +type jsonPrettyFrameWriter struct { + indent int + fw FrameWriter +} + +func (w *jsonPrettyFrameWriter) Write(p []byte) (n int, err error) { + // Indent the source bytes + var indented bytes.Buffer + err = json.Indent(&indented, p, "", strings.Repeat(" ", w.indent)) + if err != nil { + return + } + // Write the pretty bytes to the underlying writer + n, err = w.fw.Write(indented.Bytes()) + return +} + +func (w *jsonPrettyFrameWriter) ContentType() ContentType { + return w.fw.ContentType() +} From b644f73a6a03c617078e57a16cf591ccbc670005 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 11 Jan 2021 21:40:57 +0200 Subject: [PATCH 006/149] Make FrameReader thread-safe, and implement a Single-Frame FrameReader --- pkg/serializer/frame_reader.go | 55 ++++++++++++++++++++++++++++++++-- 1 file changed, 52 insertions(+), 3 deletions(-) diff --git a/pkg/serializer/frame_reader.go b/pkg/serializer/frame_reader.go index 26ead8d2..aee975fe 100644 --- a/pkg/serializer/frame_reader.go +++ b/pkg/serializer/frame_reader.go @@ -6,6 +6,7 @@ import ( "io" "io/ioutil" "os" + "sync" "k8s.io/apimachinery/pkg/runtime/serializer/json" ) @@ -71,6 +72,7 @@ func NewJSONFrameReader(rc ReadCloser) FrameReader { func newFrameReader(rc io.ReadCloser, contentType ContentType) *frameReader { return &frameReader{ rc: rc, + rcMu: &sync.Mutex{}, bufSize: defaultBufSize, maxFrameSize: defaultMaxFrameSize, contentType: contentType, @@ -79,12 +81,13 @@ func newFrameReader(rc io.ReadCloser, contentType ContentType) *frameReader { // frameReader is a FrameReader implementation type frameReader struct { - rc io.ReadCloser + // the underlying readcloser and the mutex that guards it + rc io.ReadCloser + rcMu *sync.Mutex + bufSize int maxFrameSize int contentType ContentType - - // TODO: Maybe add mutexes for thread-safety (so no two goroutines read at the same time) } // ReadFrame reads one frame from the underlying io.Reader. ReadFrame @@ -93,6 +96,10 @@ type frameReader struct { // ReadFrame keeps on reading using new calls. ReadFrame might return both data and // io.EOF. io.EOF will be returned in the final call. func (rf *frameReader) ReadFrame() (frame []byte, err error) { + // Only one actor can read at a time + rf.rcMu.Lock() + defer rf.rcMu.Unlock() + // Temporary buffer to parts of a frame into var buf []byte // How many bytes were read by the read call @@ -149,6 +156,10 @@ func (rf *frameReader) ContentType() ContentType { // Close implements io.Closer and closes the underlying ReadCloser func (rf *frameReader) Close() error { + // Only one actor can access rf.rc at a time + rf.rcMu.Lock() + defer rf.rcMu.Unlock() + return rf.rc.Close() } @@ -166,3 +177,41 @@ func FromFile(filePath string) ReadCloser { func FromBytes(content []byte) ReadCloser { return ioutil.NopCloser(bytes.NewReader(content)) } + +// NewSingleFrameReader returns a FrameReader for only a single frame of +// the specified content type. This avoids overhead if it is known that the +// byte array only contains one frame. The given frame is returned in +// whole in the first ReadFrame() call, and io.EOF is returned in all future +// invocations. +func NewSingleFrameReader(b []byte, ct ContentType) FrameReader { + return &singleFrameReader{ + ct: ct, + b: b, + hasBeenRead: false, + hasBeenReadMu: &sync.Mutex{}, + } +} + +var _ FrameReader = &singleFrameReader{} + +type singleFrameReader struct { + ct ContentType + b []byte + hasBeenRead bool + hasBeenReadMu *sync.Mutex +} + +func (r *singleFrameReader) ReadFrame() ([]byte, error) { + r.hasBeenReadMu.Lock() + defer r.hasBeenReadMu.Unlock() + // If ReadFrame() has been called once, just return io.EOF. + if r.hasBeenRead { + return nil, io.EOF + } + // The first time, mark that we've read, and return the single frame + r.hasBeenRead = true + return r.b, nil +} + +func (r *singleFrameReader) ContentType() ContentType { return r.ct } +func (r *singleFrameReader) Close() error { return nil } From 6d64cff15ef67a11234edec14bcf32fa45f0ad77 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 11 Jan 2021 22:08:06 +0200 Subject: [PATCH 007/149] Handle updated kyaml in tests --- pkg/serializer/comments_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/serializer/comments_test.go b/pkg/serializer/comments_test.go index 8f4c65c2..6332e5ca 100644 --- a/pkg/serializer/comments_test.go +++ b/pkg/serializer/comments_test.go @@ -18,8 +18,8 @@ kind: Test spec: # Head comment data: - - field # Inline comment - - another + - field # Inline comment + - another thing: # Head comment var: true @@ -29,9 +29,9 @@ const sampleData2 = `kind: Test spec: # Head comment data: - - field # Inline comment - - another: - subthing: "yes" + - field # Inline comment + - another: + subthing: "yes" thing: # Head comment var: true From 29e4ca64efe49410882f57dcf363215adc628e49 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 11 Jan 2021 22:08:40 +0200 Subject: [PATCH 008/149] Always add the mutex when creating the framewriter in test --- pkg/serializer/frame_reader_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/serializer/frame_reader_test.go b/pkg/serializer/frame_reader_test.go index a696ed7d..063ed8a0 100644 --- a/pkg/serializer/frame_reader_test.go +++ b/pkg/serializer/frame_reader_test.go @@ -5,6 +5,7 @@ import ( "io/ioutil" "reflect" "strings" + "sync" "testing" "k8s.io/apimachinery/pkg/runtime/serializer/json" @@ -92,6 +93,7 @@ func Test_FrameReader_ReadFrame(t *testing.T) { t.Run(tt.name, func(t *testing.T) { rf := &frameReader{ rc: tt.fields.rc, + rcMu: &sync.Mutex{}, bufSize: tt.fields.bufSize, maxFrameSize: tt.fields.maxFrameSize, } From 21b857155b0c4a84c0b773136d05a9f9bf290d48 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 11 Jan 2021 22:15:08 +0200 Subject: [PATCH 009/149] Move common functions to an utils file. Add a mutex to guard the scheme in the serializer. Add a JSONTransformer interface, and make ContentType implement it. --- pkg/serializer/serializer.go | 73 ++++++++++++++---------- pkg/serializer/utils.go | 106 +++++++++++++++++++++++++++++++++++ 2 files changed, 149 insertions(+), 30 deletions(-) create mode 100644 pkg/serializer/utils.go diff --git a/pkg/serializer/serializer.go b/pkg/serializer/serializer.go index 8bf691d2..be1c42e1 100644 --- a/pkg/serializer/serializer.go +++ b/pkg/serializer/serializer.go @@ -3,10 +3,12 @@ package serializer import ( "errors" "fmt" + "sync" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" k8sserializer "k8s.io/apimachinery/pkg/runtime/serializer" + "sigs.k8s.io/yaml" ) // ContentType specifies a content type for Encoders, Decoders, FrameWriters and FrameReaders @@ -22,8 +24,12 @@ const ( ContentTypeYAML = ContentType(runtime.ContentTypeYAML) ) -// ErrUnsupportedContentType is returned if the specified content type isn't supported -var ErrUnsupportedContentType = errors.New("unsupported content type") +var ( + // ErrUnsupportedContentType is returned if the specified content type isn't supported + ErrUnsupportedContentType = errors.New("unsupported content type") + // ErrObjectIsNotList is returned when a runtime.Object was not a List type + ErrObjectIsNotList = errors.New("given runtime.Object is not a *List type, or does not implement metav1.ListInterface") +) // ContentTyped is an interface for objects that are specific to a set ContentType. type ContentTyped interface { @@ -31,6 +37,34 @@ type ContentTyped interface { ContentType() ContentType } +// JSONTransformer is an interface for transforming bytes to JSON from +// a content-type specific implementation. +type JSONTransformer interface { + ContentTyped + // TransformToJSON takes bytes of the supported ContentType, and + // returns JSON bytes. + TransformToJSON([]byte) ([]byte, error) +} + +// ContentType implements JSONTransformer +var _ JSONTransformer = ContentType("") + +func (ct ContentType) ContentType() ContentType { return ct } + +// TransformToJSON takes bytes of the supported ContentType, and +// returns JSON bytes. +func (ct ContentType) TransformToJSON(in []byte) ([]byte, error) { + // If the given content type already is JSON, then we're all good + switch ct { + case ContentTypeJSON: + return in, nil + case ContentTypeYAML: + return yaml.YAMLToJSONStrict(in) + default: + return nil, fmt.Errorf("%w: cannot transform %s to JSON", ErrUnsupportedContentType, ct) + } +} + // Serializer is an interface providing high-level decoding/encoding functionality // for types registered in a *runtime.Scheme type Serializer interface { @@ -63,8 +97,10 @@ type Serializer interface { } type schemeAndCodec struct { - scheme *runtime.Scheme - codecs *k8sserializer.CodecFactory + // scheme is not thread-safe, hence it is guarded by a mutex + scheme *runtime.Scheme + schemeMu *sync.Mutex + codecs *k8sserializer.CodecFactory } // Encoder is a high-level interface for encoding Kubernetes API Machinery objects and writing them @@ -188,8 +224,9 @@ func NewSerializer(scheme *runtime.Scheme, codecs *k8sserializer.CodecFactory) S return &serializer{ schemeAndCodec: &schemeAndCodec{ - scheme: scheme, - codecs: codecs, + scheme: scheme, + schemeMu: &sync.Mutex{}, + codecs: codecs, }, converter: newConverter(scheme), defaulter: newDefaulter(scheme), @@ -242,27 +279,3 @@ func prioritizedVersionForGroup(scheme *runtime.Scheme, groupName string) (schem // Use the first, preferred, (external) version return gvs[0], nil } - -func GVKForObject(scheme *runtime.Scheme, obj runtime.Object) (schema.GroupVersionKind, error) { - // Safety check: one should not do this - if obj == nil || obj.GetObjectKind() == nil { - return schema.GroupVersionKind{}, fmt.Errorf("GVKForObject: obj or obj.GetObjectKind() must not be nil") - } - // If we already have TypeMeta filled in here, just use it - // TODO: This is probably not needed - gvk := obj.GetObjectKind().GroupVersionKind() - if !gvk.Empty() { - return gvk, nil - } - - // TODO: If there are two GVKs returned, it's probably a misconfiguration in the scheme - // It might be expected though, and we can tolerate setting the GVK manually IFF there are more than - // one ObjectKind AND the given GVK is one of them. - - // Get the possible kinds for the object - gvks, unversioned, err := scheme.ObjectKinds(obj) - if unversioned || err != nil || len(gvks) != 1 { - return schema.GroupVersionKind{}, fmt.Errorf("unversioned %t or err %v or invalid gvks %v", unversioned, err, gvks) - } - return gvks[0], nil -} diff --git a/pkg/serializer/utils.go b/pkg/serializer/utils.go new file mode 100644 index 00000000..d0b5f478 --- /dev/null +++ b/pkg/serializer/utils.go @@ -0,0 +1,106 @@ +package serializer + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +func GVKForObject(scheme *runtime.Scheme, obj runtime.Object) (schema.GroupVersionKind, error) { + // Safety check: one should not do this + if obj == nil || obj.GetObjectKind() == nil { + return schema.GroupVersionKind{}, fmt.Errorf("GVKForObject: obj or obj.GetObjectKind() must not be nil") + } + + // If this is a runtime.Unknown object, return the GVK stored in TypeMeta + if gvk := obj.GetObjectKind().GroupVersionKind(); IsUnknown(obj) && !gvk.Empty() { + return gvk, nil + } + + // Special case: Allow objects with two versions to be registered, when the caller is specific + // about what version they want populated. + // This is needed essentially for working around that there are specific K8s types (structs) + // that have been registered with multiple GVKs (e.g. a Deployment struct in both apps & extensions) + // TODO: Maybe there is a better way to solve this? Remove unwanted entries from the scheme typeToGVK + // map manually? + gvks, _, _ := scheme.ObjectKinds(obj) + if len(gvks) > 1 { + // If we have a configuration with more than one gvk for the same object, + // check the set GVK on the object to "choose" the right one, if exists in the list + setGVK := obj.GetObjectKind().GroupVersionKind() + if !setGVK.Empty() { + for _, gvk := range gvks { + if EqualsGVK(setGVK, gvk) { + return gvk, nil + } + } + } + } + + // TODO: Should we just copy-paste this one, or move it into k8s core to avoid importing controller-runtime + // only for this function? + return apiutil.GVKForObject(obj, scheme) +} + +// GVKForList returns the GroupVersionKind for the items in a given List type. +// In the case of Unstructured or PartialObjectMetadata, it is required that this +// information is already set in TypeMeta. The "List" suffix is never returned. +func GVKForList(obj client.ObjectList, scheme *runtime.Scheme) (schema.GroupVersionKind, error) { + // First, get the GVK as normal. + gvk, err := GVKForObject(scheme, obj) + if err != nil { + return schema.GroupVersionKind{}, err + } + // Make sure this is a list type, i.e. it has the an "Items" field. + isList := meta.IsListType(obj) + if !isList { + return schema.GroupVersionKind{}, ErrObjectIsNotList + } + // Make sure the returned GVK never ends in List. + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + return gvk, nil +} + +// EqualsGK returns true if gk1 and gk2 have the same fields. +func EqualsGK(gk1, gk2 schema.GroupKind) bool { + return gk1.Group == gk2.Group && gk1.Kind == gk2.Kind +} + +// EqualsGVK returns true if gvk1 and gvk2 have the same fields. +func EqualsGVK(gvk1, gvk2 schema.GroupVersionKind) bool { + return EqualsGK(gvk1.GroupKind(), gvk2.GroupKind()) && gvk1.Version == gvk2.Version +} + +func IsUnknown(obj runtime.Object) bool { + _, isUnknown := obj.(*runtime.Unknown) + return isUnknown +} + +func IsPartialObject(obj runtime.Object) bool { + _, isPartial := obj.(*metav1.PartialObjectMetadata) + return isPartial +} + +func IsPartialObjectList(obj runtime.Object) bool { + _, isPartialList := obj.(*metav1.PartialObjectMetadataList) + return isPartialList +} + +func IsPartialObjectMetadataOrList(obj runtime.Object) bool { + return IsPartialObject(obj) || IsPartialObjectList(obj) +} + +func IsUnstructured(obj runtime.Object) bool { + _, isUnstructured := obj.(runtime.Unstructured) + return isUnstructured +} + +func IsNonConvertible(obj runtime.Object) bool { + return IsUnstructured(obj) || IsPartialObjectMetadataOrList(obj) +} From a9985cc34704ccc118ba654f4a7545a563eef6f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 11 Jan 2021 22:20:38 +0200 Subject: [PATCH 010/149] Never convert unknown, unstructured or partial objects --- pkg/serializer/convertor.go | 3 ++- pkg/serializer/utils.go | 8 +++----- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/pkg/serializer/convertor.go b/pkg/serializer/convertor.go index bdea096c..3fbc814f 100644 --- a/pkg/serializer/convertor.go +++ b/pkg/serializer/convertor.go @@ -169,7 +169,8 @@ func (c *objectConvertor) ConvertToVersion(in runtime.Object, groupVersioner run // as before, using the scheme's ConvertToVersion function. But if we don't want to convert the newly-decoded // external object, we can just do nothing and the object will stay unconverted. // doConversion is always true in the Encode codepath. - if !c.doConversion { + // Also, never convert unknown, partial metadata or unstructured objects (defined as "non-convertible"). + if !c.doConversion || IsNonConvertible(in) { // DeepCopy the object to make sure that although in would be somehow modified, it doesn't affect out return in.DeepCopyObject(), nil } diff --git a/pkg/serializer/utils.go b/pkg/serializer/utils.go index d0b5f478..98886e0d 100644 --- a/pkg/serializer/utils.go +++ b/pkg/serializer/utils.go @@ -92,15 +92,13 @@ func IsPartialObjectList(obj runtime.Object) bool { return isPartialList } -func IsPartialObjectMetadataOrList(obj runtime.Object) bool { - return IsPartialObject(obj) || IsPartialObjectList(obj) -} - func IsUnstructured(obj runtime.Object) bool { _, isUnstructured := obj.(runtime.Unstructured) return isUnstructured } +// IsNonConvertible returns true for unstructured, partial and unknown objects +// that should not be converted. func IsNonConvertible(obj runtime.Object) bool { - return IsUnstructured(obj) || IsPartialObjectMetadataOrList(obj) + return IsUnstructured(obj) || IsPartialObject(obj) || IsPartialObjectList(obj) || IsUnknown(obj) } From 7106536c0cb8ccf9a2c181d079d438d789901f0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 11 Jan 2021 22:22:21 +0200 Subject: [PATCH 011/149] Guard the AddKnownTypes call to the scheme by a mutex, to avoid race conditions. --- pkg/serializer/decode.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/pkg/serializer/decode.go b/pkg/serializer/decode.go index 4feff21f..ba85e8cd 100644 --- a/pkg/serializer/decode.go +++ b/pkg/serializer/decode.go @@ -149,8 +149,14 @@ func (d *decoder) Decode(fr FrameReader) (runtime.Object, error) { func (d *decoder) decode(doc []byte, into runtime.Object, ct ContentType) (runtime.Object, error) { // If the scheme doesn't recognize a v1.List, and we enabled opts.DecodeListElements, // make the scheme able to decode the v1.List automatically - if *d.opts.DecodeListElements && !d.scheme.Recognizes(listGVK) { - d.scheme.AddKnownTypes(metav1.Unversioned, &metav1.List{}) + if *d.opts.DecodeListElements { + // As .AddKnownTypes is writing to the scheme, make sure we guard the check and the write with a + // mutex. + d.schemeMu.Lock() + if !d.scheme.Recognizes(listGVK) { + d.scheme.AddKnownTypes(metav1.Unversioned, &metav1.List{}) + } + d.schemeMu.Unlock() } // Record if this decode call should have runtime.DecodeInto-functionality From 58642885f7864d19fa538e88d56a2ba881b4f88b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 11 Jan 2021 22:26:23 +0200 Subject: [PATCH 012/149] Make it possible for the codec used internally to recognize partial objects and lists by their embedded TypeMeta info. --- pkg/serializer/decode.go | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/pkg/serializer/decode.go b/pkg/serializer/decode.go index ba85e8cd..ba07676a 100644 --- a/pkg/serializer/decode.go +++ b/pkg/serializer/decode.go @@ -367,7 +367,9 @@ func newConversionCodecForScheme( defaulter = scheme } convertor := newObjectConvertor(scheme, performConversion) - return versioning.NewCodec(encoder, decoder, convertor, scheme, scheme, defaulter, encodeVersion, decodeVersion, scheme.Name()) + // a typer that recognizes metav1.PartialObjectMetadata{,List} + typer := &customTyper{scheme} + return versioning.NewCodec(encoder, decoder, convertor, scheme, typer, defaulter, encodeVersion, decodeVersion, scheme.Name()) } // TODO: Use https://github.com/kubernetes/apimachinery/blob/master/pkg/runtime/serializer/yaml/meta.go @@ -384,3 +386,34 @@ func extractYAMLTypeMeta(data []byte) (*schema.GroupVersionKind, error) { gvk := gv.WithKind(typeMeta.Kind) return &gvk, nil } + +var _ runtime.ObjectTyper = &customTyper{} + +type customTyper struct { + scheme *runtime.Scheme +} + +// ObjectKinds is an extension to the native Scheme.ObjectKinds function, that also +// recognizes partial matadata objects and lists. The logic here follows closely the +// scheme's own logic. +func (t *customTyper) ObjectKinds(obj runtime.Object) ([]schema.GroupVersionKind, bool, error) { + // partial objects are always fine to encode/decode as-is when GVK is set. + // this similar code exists in runtime.Scheme.ObjectKinds for reference. + if IsPartialObject(obj) || IsPartialObjectList(obj) { + // we require that the GVK be populated in order to recognize the object + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) == 0 { + return nil, false, runtime.NewMissingKindErr("unstructured object has no kind") + } + if len(gvk.Version) == 0 { + return nil, false, runtime.NewMissingVersionErr("unstructured object has no version") + } + return []schema.GroupVersionKind{gvk}, false, nil + } + return t.scheme.ObjectKinds(obj) +} + +// Recognizes just calls the underlying Scheme.Recognizes +func (t *customTyper) Recognizes(gvk schema.GroupVersionKind) bool { + return t.scheme.Recognizes(gvk) +} From 951a162cfbe7e2b613e7adf0037df1b5d7d6763c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 11 Jan 2021 22:30:03 +0200 Subject: [PATCH 013/149] Use the upstream YAML MetaFactory. --- pkg/serializer/decode.go | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/pkg/serializer/decode.go b/pkg/serializer/decode.go index ba07676a..054d58ed 100644 --- a/pkg/serializer/decode.go +++ b/pkg/serializer/decode.go @@ -11,7 +11,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer/json" "k8s.io/apimachinery/pkg/runtime/serializer/versioning" - "sigs.k8s.io/yaml" + serializeryaml "k8s.io/apimachinery/pkg/runtime/serializer/yaml" ) // This is the groupversionkind for the v1.List object @@ -274,7 +274,7 @@ func (d *decoder) decodeUnknown(doc []byte, ct ContentType) (runtime.Object, err func (d *decoder) handleDecodeError(doc []byte, origErr error) error { // Parse the document's TypeMeta information - gvk, err := extractYAMLTypeMeta(doc) + gvk, err := serializeryaml.DefaultMetaFactory.Interpret(doc) if err != nil { return fmt.Errorf("failed to interpret TypeMeta from the given the YAML: %v. Decode error was: %w", err, origErr) } @@ -372,21 +372,6 @@ func newConversionCodecForScheme( return versioning.NewCodec(encoder, decoder, convertor, scheme, typer, defaulter, encodeVersion, decodeVersion, scheme.Name()) } -// TODO: Use https://github.com/kubernetes/apimachinery/blob/master/pkg/runtime/serializer/yaml/meta.go -// when we can assume everyone is vendoring k8s v1.19 -func extractYAMLTypeMeta(data []byte) (*schema.GroupVersionKind, error) { - typeMeta := runtime.TypeMeta{} - if err := yaml.Unmarshal(data, &typeMeta); err != nil { - return nil, fmt.Errorf("could not interpret GroupVersionKind: %w", err) - } - gv, err := schema.ParseGroupVersion(typeMeta.APIVersion) - if err != nil { - return nil, err - } - gvk := gv.WithKind(typeMeta.Kind) - return &gvk, nil -} - var _ runtime.ObjectTyper = &customTyper{} type customTyper struct { From 1dd8632c17c0963b8b37f36990be236e7c926546 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 11 Jan 2021 22:30:43 +0200 Subject: [PATCH 014/149] Fix the code getting the extension from the content-type, earlier it was racy. --- pkg/storage/format.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pkg/storage/format.go b/pkg/storage/format.go index 84993ceb..f5756592 100644 --- a/pkg/storage/format.go +++ b/pkg/storage/format.go @@ -10,11 +10,11 @@ var ContentTypes = map[string]serializer.ContentType{ ".yml": serializer.ContentTypeYAML, } +var extToContentType = map[serializer.ContentType]string{ + serializer.ContentTypeJSON: ".json", + serializer.ContentTypeYAML: ".yaml", +} + func extForContentType(wanted serializer.ContentType) string { - for ext, ct := range ContentTypes { - if ct == wanted { - return ext - } - } - return "" + return extToContentType[wanted] } From 73f90ba20dd5edc5412786759b8bc8b95e4cd56d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Tue, 12 Jan 2021 00:03:22 +0200 Subject: [PATCH 015/149] Use the "functional" options pattern from controller-runtime in the serializer. --- pkg/serializer/comments.go | 4 +- pkg/serializer/decode.go | 115 ++------------ pkg/serializer/encode.go | 74 +-------- pkg/serializer/options.go | 254 ++++++++++++++++++++++++++++++ pkg/serializer/serializer.go | 14 +- pkg/serializer/serializer_test.go | 22 +-- 6 files changed, 288 insertions(+), 195 deletions(-) create mode 100644 pkg/serializer/options.go diff --git a/pkg/serializer/comments.go b/pkg/serializer/comments.go index 302c4db0..a0169392 100644 --- a/pkg/serializer/comments.go +++ b/pkg/serializer/comments.go @@ -27,7 +27,7 @@ var ( func (d *decoder) tryToPreserveComments(doc []byte, obj runtime.Object, ct ContentType) { // If the user opted into preserving comments and the format is YAML, proceed // If they didn't, return directly - if !(*d.opts.PreserveComments && ct == ContentTypeYAML) { + if !(d.opts.PreserveComments == PreserveCommentsStrict && ct == ContentTypeYAML) { return } @@ -41,7 +41,7 @@ func (d *decoder) tryToPreserveComments(doc []byte, obj runtime.Object, ct Conte // tryToPreserveComments tries to locate the possibly-saved original file data in the object's annotation func (e *encoder) encodeWithCommentSupport(versionEncoder runtime.Encoder, fw FrameWriter, obj runtime.Object, metaObj metav1.Object) error { // If the user did not opt into preserving comments, just sanitize ObjectMeta temporarily and and return - if !*e.opts.PreserveComments { + if e.opts.PreserveComments == PreserveCommentsDisable { // Normal encoding without the annotation (so it doesn't leak by accident) return noAnnotationWrapper(metaObj, e.normalEncodeFunc(versionEncoder, fw, obj)) } diff --git a/pkg/serializer/decode.go b/pkg/serializer/decode.go index 054d58ed..bcc9b280 100644 --- a/pkg/serializer/decode.go +++ b/pkg/serializer/decode.go @@ -5,7 +5,6 @@ import ( "io" "reflect" - "github.com/weaveworks/libgitops/pkg/util" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -17,107 +16,23 @@ import ( // This is the groupversionkind for the v1.List object var listGVK = metav1.Unversioned.WithKind("List") -type DecodingOptions struct { - // Not applicable for Decoder.DecodeInto(). If true, the decoded external object - // will be converted into its hub (or internal, where applicable) representation. Otherwise, the decoded - // object will be left in its external representation. (Default: false) - ConvertToHub *bool - - // Parse the YAML/JSON in strict mode, returning a specific error if the input - // contains duplicate or unknown fields or formatting errors. (Default: true) - Strict *bool - - // Automatically default the decoded object. (Default: false) - Default *bool - - // Only applicable for Decoder.DecodeAll(). If the underlying data contains a v1.List, - // the items of the list will be traversed, decoded into their respective types, and - // appended to the returned slice. The v1.List will in this case not be returned. - // This conversion does NOT support preserving comments. If the given scheme doesn't - // recognize the v1.List, before using it will be registered automatically. (Default: true) - DecodeListElements *bool - - // Whether to preserve YAML comments internally. This only works for objects embedding metav1.ObjectMeta. - // Only applicable to ContentTypeYAML framers. - // Using any other framer will be silently ignored. Usage of this option also requires setting - // the PreserveComments in EncodingOptions, too. (Default: false) - PreserveComments *bool - - // DecodeUnknown specifies whether decode objects with an unknown GroupVersionKind into a - // *runtime.Unknown object when running Decode(All) (true value) or to return an error when - // any unrecognized type is found (false value). (Default: false) - DecodeUnknown *bool -} - -type DecodingOptionsFunc func(*DecodingOptions) - -func WithConvertToHubDecode(convert bool) DecodingOptionsFunc { - return func(opts *DecodingOptions) { - opts.ConvertToHub = &convert - } -} - -func WithStrictDecode(strict bool) DecodingOptionsFunc { - return func(opts *DecodingOptions) { - opts.Strict = &strict - } -} - -func WithDefaultsDecode(defaults bool) DecodingOptionsFunc { - return func(opts *DecodingOptions) { - opts.Default = &defaults - } -} - -func WithListElementsDecoding(listElements bool) DecodingOptionsFunc { - return func(opts *DecodingOptions) { - opts.DecodeListElements = &listElements - } -} - -func WithCommentsDecode(comments bool) DecodingOptionsFunc { - return func(opts *DecodingOptions) { - opts.PreserveComments = &comments - } -} - -func WithUnknownDecode(unknown bool) DecodingOptionsFunc { - return func(opts *DecodingOptions) { - opts.DecodeUnknown = &unknown - } -} - -func WithDecodingOptions(newOpts DecodingOptions) DecodingOptionsFunc { - return func(opts *DecodingOptions) { - // TODO: Null-check all of these before using them - *opts = newOpts - } -} +func newDecoder(schemeAndCodec *schemeAndCodec, opts DecodeOptions) Decoder { + // Allow both YAML and JSON inputs (JSON is a subset of YAML), and deserialize in strict mode + s := json.NewSerializerWithOptions(json.DefaultMetaFactory, schemeAndCodec.scheme, schemeAndCodec.scheme, json.SerializerOptions{ + Yaml: true, + Strict: *opts.Strict, + }) -func defaultDecodeOpts() *DecodingOptions { - return &DecodingOptions{ - ConvertToHub: util.BoolPtr(false), - Strict: util.BoolPtr(true), - Default: util.BoolPtr(false), - DecodeListElements: util.BoolPtr(true), - PreserveComments: util.BoolPtr(false), - DecodeUnknown: util.BoolPtr(false), - } -} + decodeCodec := decoderForVersion(schemeAndCodec.scheme, s, *opts.Default, *opts.ConvertToHub) -func newDecodeOpts(fns ...DecodingOptionsFunc) *DecodingOptions { - opts := defaultDecodeOpts() - for _, fn := range fns { - fn(opts) - } - return opts + return &decoder{schemeAndCodec, decodeCodec, opts} } type decoder struct { *schemeAndCodec decoder runtime.Decoder - opts DecodingOptions + opts DecodeOptions } // Decode returns the decoded object from the next document in the FrameReader stream. @@ -326,18 +241,6 @@ func (d *decoder) extractNestedObjects(obj runtime.Object, ct ContentType) ([]ru return objs, nil } -func newDecoder(schemeAndCodec *schemeAndCodec, opts DecodingOptions) Decoder { - // Allow both YAML and JSON inputs (JSON is a subset of YAML), and deserialize in strict mode - s := json.NewSerializerWithOptions(json.DefaultMetaFactory, schemeAndCodec.scheme, schemeAndCodec.scheme, json.SerializerOptions{ - Yaml: true, - Strict: *opts.Strict, - }) - - decodeCodec := decoderForVersion(schemeAndCodec.scheme, s, *opts.Default, *opts.ConvertToHub) - - return &decoder{schemeAndCodec, decodeCodec, opts} -} - // decoderForVersion is used instead of CodecFactory.DecoderForVersion, as we want to use our own converter func decoderForVersion(scheme *runtime.Scheme, decoder *json.Serializer, doDefaulting, doConversion bool) runtime.Decoder { return newConversionCodecForScheme( diff --git a/pkg/serializer/encode.go b/pkg/serializer/encode.go index 50badaab..a06bd8ca 100644 --- a/pkg/serializer/encode.go +++ b/pkg/serializer/encode.go @@ -5,83 +5,21 @@ import ( "encoding/json" "strings" - "github.com/weaveworks/libgitops/pkg/util" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) -type EncodingOptions struct { - // Indent JSON encoding output with this many spaces. (Default: nil, means no indentation) - // Only applicable to ContentTypeJSON framers. - // TODO: Make this a property of the FrameWriter instead? - JSONIndent *int - // Whether to preserve YAML comments internally. This only works for objects embedding metav1.ObjectMeta. - // Only applicable to ContentTypeYAML framers. - // Using any other framer will be silently ignored. Usage of this option also requires setting - // the PreserveComments in DecodingOptions, too. (Default: false) - // TODO: Make this a BestEffort & Strict mode - PreserveComments *bool - - // TODO: Maybe consider an option to always convert to the preferred version (not just internal) -} - -type EncodingOptionsFunc func(*EncodingOptions) - -func WithPrettyEncode(pretty bool) EncodingOptionsFunc { - if pretty { - return WithJSONIndent(2) - } - return func(opts *EncodingOptions) { - // disable the indenting - opts.JSONIndent = nil - } -} - -func WithJSONIndent(spaces int) EncodingOptionsFunc { - return func(opts *EncodingOptions) { - opts.JSONIndent = &spaces - } -} - -func WithCommentsEncode(comments bool) EncodingOptionsFunc { - return func(opts *EncodingOptions) { - opts.PreserveComments = &comments - } -} - -func WithEncodingOptions(newOpts EncodingOptions) EncodingOptionsFunc { - return func(opts *EncodingOptions) { - // TODO: Null-check all of these before using them - *opts = newOpts - } -} - -func defaultEncodeOpts() *EncodingOptions { - return &EncodingOptions{ - JSONIndent: util.IntPtr(2), // Default to "pretty encoding" - PreserveComments: util.BoolPtr(false), - } -} - -func newEncodeOpts(fns ...EncodingOptionsFunc) *EncodingOptions { - opts := defaultEncodeOpts() - for _, fn := range fns { - fn(opts) +func newEncoder(schemeAndCodec *schemeAndCodec, opts EncodeOptions) Encoder { + return &encoder{ + schemeAndCodec, + opts, } - return opts } type encoder struct { *schemeAndCodec - opts EncodingOptions -} - -func newEncoder(schemeAndCodec *schemeAndCodec, opts EncodingOptions) Encoder { - return &encoder{ - schemeAndCodec, - opts, - } + opts EncodeOptions } // Encode encodes the given objects and writes them to the specified FrameWriter. @@ -138,7 +76,7 @@ func (e *encoder) EncodeForGroupVersion(fw FrameWriter, obj runtime.Object, gv s // Check if the user requested prettified JSON output. // If the ContentType is JSON this is ok, we will intent the encode output on the fly. - if e.opts.JSONIndent != nil && fw.ContentType() == ContentTypeJSON { + if *e.opts.JSONIndent > 0 && fw.ContentType() == ContentTypeJSON { fw = &jsonPrettyFrameWriter{indent: *e.opts.JSONIndent, fw: fw} } diff --git a/pkg/serializer/options.go b/pkg/serializer/options.go new file mode 100644 index 00000000..466372de --- /dev/null +++ b/pkg/serializer/options.go @@ -0,0 +1,254 @@ +package serializer + +import "github.com/weaveworks/libgitops/pkg/util" + +type EncodeOption interface { + ApplyToEncode(*EncodeOptions) +} + +func defaultEncodeOpts() *EncodeOptions { + return &EncodeOptions{ + // Default to "pretty encoding" + JSONIndent: util.IntPtr(2), + PreserveComments: PreserveCommentsDisable, + } +} + +type EncodeOptions struct { + // Indent JSON encoding output with this many spaces. + // Set this to 0, use PrettyEncode(false) or JSONIndent(0) to disable pretty output. + // Only applicable to ContentTypeJSON framers. + // + // Default: nil or 0, means no indentation + // TODO: Make this a property of the FrameWriter instead? + JSONIndent *int + + // Whether to preserve YAML comments internally. + // This only works for objects embedding metav1.ObjectMeta. + // + // Only applicable to ContentTypeYAML framers. Using any other framer will be silently ignored. + // + // Usage of this option also requires setting the PreserveComments in DecodeOptions, too. + // + // Default: PreserveCommentsDisable + PreserveComments PreserveComments + + // TODO: Maybe consider an option to always convert to the preferred version (not just internal) +} + +var _ EncodeOption = &EncodeOptions{} + +func (o *EncodeOptions) ApplyToEncode(target *EncodeOptions) { + if o.JSONIndent != nil { + target.JSONIndent = o.JSONIndent + } + if o.PreserveComments != 0 { + target.PreserveComments = o.PreserveComments + } +} + +func (o *EncodeOptions) ApplyOptions(opts []EncodeOption) *EncodeOptions { + for _, opt := range opts { + opt.ApplyToEncode(o) + } + // it is guaranteed that all options are non-nil, as defaultEncodeOpts() includes all fields + return o +} + +// Whether to preserve YAML comments internally. +// This only works for objects embedding metav1.ObjectMeta. +// +// Only applicable to ContentTypeYAML framers. Using any other framer will be silently ignored. +// TODO: Add a BestEffort mode +type PreserveComments int + +const ( + // PreserveCommentsDisable means do not try to preserve comments + PreserveCommentsDisable PreserveComments = 1 + iota + // PreserveCommentsStrict means try to preserve comments, and fail if it does not work + PreserveCommentsStrict +) + +var _ EncodeOption = PreserveComments(0) +var _ DecodeOption = PreserveComments(0) + +func (p PreserveComments) ApplyToEncode(target *EncodeOptions) { + // TODO: Validate? + target.PreserveComments = p +} + +func (p PreserveComments) ApplyToDecode(target *DecodeOptions) { + // TODO: Validate? + target.PreserveComments = p +} + +// Indent JSON encoding output with this many spaces. +// Use PrettyEncode(false) or JSONIndent(0) to disable pretty output. +// Only applicable to ContentTypeJSON framers. +type JSONIndent int + +var _ EncodeOption = JSONIndent(0) + +func (i JSONIndent) ApplyToEncode(target *EncodeOptions) { + target.JSONIndent = util.IntPtr(int(i)) +} + +// Shorthand for JSONIndent(0) if false, or JSONIndent(2) if true +type PrettyEncode bool + +var _ EncodeOption = PrettyEncode(false) + +func (pretty PrettyEncode) ApplyToEncode(target *EncodeOptions) { + if pretty { + JSONIndent(2).ApplyToEncode(target) + } else { + JSONIndent(0).ApplyToEncode(target) + } +} + +// DECODING + +type DecodeOption interface { + ApplyToDecode(*DecodeOptions) +} + +func defaultDecodeOpts() *DecodeOptions { + return &DecodeOptions{ + ConvertToHub: util.BoolPtr(false), + Strict: util.BoolPtr(true), + Default: util.BoolPtr(false), + DecodeListElements: util.BoolPtr(true), + PreserveComments: PreserveCommentsDisable, + DecodeUnknown: util.BoolPtr(false), + } +} + +type DecodeOptions struct { + // Not applicable for Decoder.DecodeInto(). If true, the decoded external object + // will be converted into its hub (or internal, where applicable) representation. + // Otherwise, the decoded object will be left in its external representation. + // + // Default: false + ConvertToHub *bool + + // Parse the YAML/JSON in strict mode, returning a specific error if the input + // contains duplicate or unknown fields or formatting errors. + // + // Default: true + Strict *bool + + // Automatically default the decoded object. + // Default: false + Default *bool + + // Only applicable for Decoder.DecodeAll(). If the underlying data contains a v1.List, + // the items of the list will be traversed, decoded into their respective types, and + // appended to the returned slice. The v1.List will in this case not be returned. + // This conversion does NOT support preserving comments. If the given scheme doesn't + // recognize the v1.List, before using it will be registered automatically. + // + // Default: true + DecodeListElements *bool + + // Whether to preserve YAML comments internally. + // This only works for objects embedding metav1.ObjectMeta. + // + // Only applicable to ContentTypeYAML framers. Using any other framer will be silently ignored. + // + // Usage of this option also requires setting the PreserveComments in EncodeOptions, too. + // + // Default: PreserveCommentsDisable + PreserveComments PreserveComments + + // DecodeUnknown specifies whether decode objects with an unknown GroupVersionKind into a + // *runtime.Unknown object when running Decode(All) (true value) or to return an error when + // any unrecognized type is found (false value). + // + // Default: false + DecodeUnknown *bool +} + +var _ DecodeOption = &DecodeOptions{} + +func (o *DecodeOptions) ApplyToDecode(target *DecodeOptions) { + if o.ConvertToHub != nil { + target.ConvertToHub = o.ConvertToHub + } + if o.Strict != nil { + target.Strict = o.Strict + } + if o.Default != nil { + target.Default = o.Default + } + if o.DecodeListElements != nil { + target.DecodeListElements = o.DecodeListElements + } + if o.PreserveComments != 0 { + target.PreserveComments = o.PreserveComments + } + if o.DecodeUnknown != nil { + target.DecodeUnknown = o.DecodeUnknown + } +} + +func (o *DecodeOptions) ApplyOptions(opts []DecodeOption) *DecodeOptions { + for _, opt := range opts { + opt.ApplyToDecode(o) + } + // it is guaranteed that all options are non-nil, as defaultDecodeOpts() includes all fields + return o +} + +// Not applicable for Decoder.DecodeInto(). If true, the decoded external object +// will be converted into its hub (or internal, where applicable) representation. +// Otherwise, the decoded object will be left in its external representation. +type ConvertToHub bool + +var _ DecodeOption = ConvertToHub(false) + +func (b ConvertToHub) ApplyToDecode(target *DecodeOptions) { + target.ConvertToHub = util.BoolPtr(bool(b)) +} + +// Parse the YAML/JSON in strict mode, returning a specific error if the input +// contains duplicate or unknown fields or formatting errors. +type DecodeStrict bool + +var _ DecodeOption = DecodeStrict(false) + +func (b DecodeStrict) ApplyToDecode(target *DecodeOptions) { + target.Strict = util.BoolPtr(bool(b)) +} + +// Automatically default the decoded object. +type DefaultAtDecode bool + +var _ DecodeOption = DefaultAtDecode(false) + +func (b DefaultAtDecode) ApplyToDecode(target *DecodeOptions) { + target.Default = util.BoolPtr(bool(b)) +} + +// Only applicable for Decoder.DecodeAll(). If the underlying data contains a v1.List, +// the items of the list will be traversed, decoded into their respective types, and +// appended to the returned slice. The v1.List will in this case not be returned. +// This conversion does NOT support preserving comments. If the given scheme doesn't +// recognize the v1.List, before using it will be registered automatically. +type DecodeListElements bool + +var _ DecodeOption = DecodeListElements(false) + +func (b DecodeListElements) ApplyToDecode(target *DecodeOptions) { + target.DecodeListElements = util.BoolPtr(bool(b)) +} + +// DecodeUnknown specifies whether decode objects with an unknown GroupVersionKind into a +// *runtime.Unknown object when running Decode(All) (true value) or to return an error when +// any unrecognized type is found (false value). +type DecodeUnknown bool + +var _ DecodeOption = DecodeUnknown(false) + +func (b DecodeUnknown) ApplyToDecode(target *DecodeOptions) { + target.DecodeUnknown = util.BoolPtr(bool(b)) +} diff --git a/pkg/serializer/serializer.go b/pkg/serializer/serializer.go index be1c42e1..48302f92 100644 --- a/pkg/serializer/serializer.go +++ b/pkg/serializer/serializer.go @@ -72,13 +72,13 @@ type Serializer interface { // a FrameWriter. The decoder can be customized by passing some options (e.g. WithDecodingOptions) // to this call. // The decoder supports both "classic" API Machinery objects and controller-runtime CRDs - Decoder(optsFn ...DecodingOptionsFunc) Decoder + Decoder(optsFn ...DecodeOption) Decoder // Encoder is a high-level interface for encoding Kubernetes API Machinery objects and writing them // to a FrameWriter. The encoder can be customized by passing some options (e.g. WithEncodingOptions) // to this call. // The encoder supports both "classic" API Machinery objects and controller-runtime CRDs - Encoder(optsFn ...EncodingOptionsFunc) Encoder + Encoder(optsFn ...EncodeOption) Encoder // Converter is a high-level interface for converting objects between different versions // The converter supports both "classic" API Machinery objects and controller-runtime CRDs @@ -252,14 +252,12 @@ func (s *serializer) Codecs() *k8sserializer.CodecFactory { return s.codecs } -func (s *serializer) Decoder(optFns ...DecodingOptionsFunc) Decoder { - opts := newDecodeOpts(optFns...) - return newDecoder(s.schemeAndCodec, *opts) +func (s *serializer) Decoder(opts ...DecodeOption) Decoder { + return newDecoder(s.schemeAndCodec, *defaultDecodeOpts().ApplyOptions(opts)) } -func (s *serializer) Encoder(optFns ...EncodingOptionsFunc) Encoder { - opts := newEncodeOpts(optFns...) - return newEncoder(s.schemeAndCodec, *opts) +func (s *serializer) Encoder(opts ...EncodeOption) Encoder { + return newEncoder(s.schemeAndCodec, *defaultEncodeOpts().ApplyOptions(opts)) } func (s *serializer) Converter() Converter { diff --git a/pkg/serializer/serializer_test.go b/pkg/serializer/serializer_test.go index ba239855..c475ec7c 100644 --- a/pkg/serializer/serializer_test.go +++ b/pkg/serializer/serializer_test.go @@ -21,8 +21,8 @@ var ( codecs = k8sserializer.NewCodecFactory(scheme) ourserializer = NewSerializer(scheme, &codecs) defaultEncoder = ourserializer.Encoder( - WithPrettyEncode(false), // TODO: Also test the pretty serializer - WithCommentsEncode(true), + PrettyEncode(false), // TODO: Also test the pretty serializer + PreserveCommentsStrict, ) groupname = "foogroup" @@ -402,8 +402,8 @@ func TestDecode(t *testing.T) { for _, rt := range tests { t.Run(rt.name, func(t2 *testing.T) { obj, actual := ourserializer.Decoder( - WithDefaultsDecode(rt.doDefaulting), - WithConvertToHubDecode(rt.doConversion), + DefaultAtDecode(rt.doDefaulting), + ConvertToHub(rt.doConversion), ).Decode(NewYAMLFrameReader(FromBytes(rt.data))) if (actual != nil) != rt.expectedErr { t2.Errorf("expected error %t but actual %t: %v", rt.expectedErr, actual != nil, actual) @@ -444,7 +444,7 @@ func TestDecodeInto(t *testing.T) { t.Run(rt.name, func(t2 *testing.T) { actual := ourserializer.Decoder( - WithDefaultsDecode(rt.doDefaulting), + DefaultAtDecode(rt.doDefaulting), ).DecodeInto(NewYAMLFrameReader(FromBytes(rt.data)), rt.obj) if (actual != nil) != rt.expectedErr { t2.Errorf("expected error %t but actual %t: %v", rt.expectedErr, actual != nil, actual) @@ -484,8 +484,8 @@ func TestDecodeAll(t *testing.T) { for _, rt := range tests { t.Run(rt.name, func(t2 *testing.T) { objs, actual := ourserializer.Decoder( - WithDefaultsDecode(rt.doDefaulting), - WithListElementsDecoding(rt.listSplit), + DefaultAtDecode(rt.doDefaulting), + DecodeListElements(rt.listSplit), ).DecodeAll(NewYAMLFrameReader(FromBytes(rt.data))) if (actual != nil) != rt.expectedErr { t2.Errorf("expected error %t but actual %t: %v", rt.expectedErr, actual != nil, actual) @@ -527,7 +527,7 @@ func TestDecodeUnknown(t *testing.T) { for _, rt := range tests { t.Run(rt.name, func(t2 *testing.T) { obj, actual := ourserializer.Decoder( - WithUnknownDecode(rt.unknown), + DecodeUnknown(rt.unknown), ).Decode(NewYAMLFrameReader(FromBytes(rt.data))) if (actual != nil) != rt.expectedErr { t2.Errorf("expected error %t but actual %t: %v", rt.expectedErr, actual != nil, actual) @@ -560,9 +560,9 @@ func TestRoundtrip(t *testing.T) { for _, rt := range tests { t.Run(rt.name, func(t2 *testing.T) { obj, err := ourserializer.Decoder( - WithConvertToHubDecode(true), - WithCommentsDecode(true), - WithUnknownDecode(true), + ConvertToHub(true), + PreserveCommentsStrict, + DecodeUnknown(true), ).Decode(NewYAMLFrameReader(FromBytes(rt.data))) if err != nil { t2.Errorf("unexpected decode error: %v", err) From 143839c535db7e2e17ba88647770030487ffedfe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Tue, 12 Jan 2021 00:12:23 +0200 Subject: [PATCH 016/149] Create pluggable namespace-related bridges between our Storage system and the rest of Kubernetes. --- pkg/storage/namespaces.go | 168 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 168 insertions(+) create mode 100644 pkg/storage/namespaces.go diff --git a/pkg/storage/namespaces.go b/pkg/storage/namespaces.go new file mode 100644 index 00000000..75b9a1b4 --- /dev/null +++ b/pkg/storage/namespaces.go @@ -0,0 +1,168 @@ +package storage + +import ( + "errors" + "fmt" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" +) + +var ( + // ErrNoSuchNamespace means that the set of namespaces was searched in the + // system, but the requested namespace wasn't in that list. + ErrNoSuchNamespace = errors.New("no such namespace in the system") +) + +// NamespaceEnforcer enforces a namespace policy for the Storage. +type NamespaceEnforcer interface { + // RequireNamespaceExists specifies whether the namespace must exist in the system. + // For example, Kubernetes requires this by default. + RequireNamespaceExists() bool + // EnforceNamespace operates on the object to make it conform with a given set of rules. + // If RequireNamespaceExists() is true, all the namespaces available in the system must + // be passed to namespaces. + // For example, Kubernetes enforces the following rules: + // Namespaced resources: + // If .metadata.namespace == "": .metadata.namespace = "default" + // If .metadata.namespace != "": Make sure there is such a namespace, and use it in that case + // Non-namespaced resources: + // If .metadata.namespace != "": .metadata.namespace = "" + EnforceNamespace(obj Object, namespaced bool, namespaces sets.String) error +} + +// K8sNamespaceEnforcer implements NamespaceEnforcer similarly to how the API server behaves. +type K8sNamespaceEnforcer struct{} + +var _ NamespaceEnforcer = K8sNamespaceEnforcer{} + +func (K8sNamespaceEnforcer) RequireNamespaceExists() bool { return true } + +func (K8sNamespaceEnforcer) EnforceNamespace(obj Object, namespaced bool, namespaces sets.String) error { + ns := obj.GetNamespace() + if !namespaced { + // If a namespace was set, it should be sanitized. + if len(ns) != 0 { + obj.SetNamespace("") + } + return nil + } + // The resource is namespaced. + // If it is empty, set it to the default namespace. + if len(ns) == 0 { + obj.SetNamespace(metav1.NamespaceDefault) + return nil + } + // If the namespace field is set, but it doesn't exist in the set, error + if !namespaces.Has(ns) { + return fmt.Errorf("%w: %q", ErrNoSuchNamespace, ns) + } + return nil +} + +// Namespacer is an interface that lets the caller know if a GroupKind is namespaced +// or not. There are two ready-made implementations: +// 1. RESTMapperToNamespacer +// 2. NewStaticNamespacer +type Namespacer interface { + // IsNamespaced returns true if the GroupKind is a namespaced type + IsNamespaced(gk schema.GroupKind) (bool, error) +} + +// RESTMapper is a subset of the meta.RESTMapper interface +type RESTMapper interface { + // RESTMapping identifies a preferred resource mapping for the provided group kind. + RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) +} + +// RESTMapperToNamespacer implements the Namespacer interface by fetching (and caching) data +// from the given RESTMapper interface, that is compatible with any meta.RESTMapper implementation. +// This allows you to e.g. pass in a meta.RESTMapper yielded from +// sigs.k8s.io/controller-runtime/pkg/client/apiutil.NewDiscoveryRESTMapper(c *rest.Config), or +// k8s.io/client-go/restmapper.NewDiscoveryRESTMapper(groups []*restmapper.APIGroupResources) +// in order to look up namespacing information from either a running API server, or statically, from +// the list of restmapper.APIGroupResources. +func RESTMapperToNamespacer(mapper RESTMapper) Namespacer { + return &restNamespacer{ + mapper: mapper, + mappingByType: make(map[schema.GroupKind]*meta.RESTMapping), + mu: &sync.RWMutex{}, + } +} + +var _ Namespacer = &restNamespacer{} + +type restNamespacer struct { + mapper RESTMapper + + mappingByType map[schema.GroupKind]*meta.RESTMapping + mu *sync.RWMutex +} + +func (n *restNamespacer) IsNamespaced(gk schema.GroupKind) (bool, error) { + m, err := n.getMapping(gk) + if err != nil { + return false, err + } + return mappingNamespaced(m), nil +} + +func (n *restNamespacer) getMapping(gk schema.GroupKind) (*meta.RESTMapping, error) { + n.mu.RLock() + mapping, ok := n.mappingByType[gk] + n.mu.RUnlock() + // If already cached, we're ok + if ok { + return mapping, nil + } + + // Write the mapping info to our cache + n.mu.Lock() + defer n.mu.Unlock() + m, err := n.mapper.RESTMapping(gk) + if err != nil { + return nil, err + } + n.mappingByType[gk] = m + return m, nil +} + +func mappingNamespaced(mapping *meta.RESTMapping) bool { + return mapping.Scope.Name() == meta.RESTScopeNameNamespace +} + +// NewStaticNamespacer has a default policy, which is that objects are in general namespaced +// (defaultToNamespaced == true), or that they are in general root-scoped (defaultToNamespaced == false). +// To the default policy, exceptions can be added, so that for that GroupKind, the default +// policy is reversed. +func NewStaticNamespacer(defaultToNamespaced bool, exceptions ...schema.GroupKind) Namespacer { + return &staticNamespacedInfo{defaultToNamespaced, exceptions} +} + +var _ Namespacer = &staticNamespacedInfo{} + +type staticNamespacedInfo struct { + defaultToNamespaced bool + exceptions []schema.GroupKind +} + +func (n *staticNamespacedInfo) IsNamespaced(gk schema.GroupKind) (bool, error) { + if n.defaultToNamespaced { + // namespace by default, the gks list is a list of root-scoped entities + return !n.gkIsException(gk), nil + } + // root by default, the gks in the list are namespaced + return n.gkIsException(gk), nil +} + +func (n *staticNamespacedInfo) gkIsException(target schema.GroupKind) bool { + for _, gk := range n.exceptions { + if gk == target { + return true + } + } + return false +} From df6dc0d90eda3753ac6e0dd910a8937e227299c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Tue, 12 Jan 2021 00:12:44 +0200 Subject: [PATCH 017/149] Add IntPtr to utils. --- pkg/util/util.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/util/util.go b/pkg/util/util.go index c80159c7..ab844bfb 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -43,6 +43,10 @@ func BoolPtr(b bool) *bool { return &b } +func IntPtr(i int) *int { + return &i +} + // RandomSHA returns a hex-encoded string from {byteLen} random bytes. func RandomSHA(byteLen int) (string, error) { b := make([]byte, byteLen) From 590b98d53b047703bab4d3f108c5e202d7a43332 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 14 Jan 2021 11:18:48 +0200 Subject: [PATCH 018/149] Add a new Patcher interface to the serializer, that can patch both typed and unstructured objects. --- pkg/serializer/patch.go | 122 +++++++++++++++++++++++++++++++++++ pkg/serializer/serializer.go | 23 +++++-- pkg/serializer/utils.go | 17 +++++ 3 files changed, 155 insertions(+), 7 deletions(-) create mode 100644 pkg/serializer/patch.go diff --git a/pkg/serializer/patch.go b/pkg/serializer/patch.go new file mode 100644 index 00000000..e44e589b --- /dev/null +++ b/pkg/serializer/patch.go @@ -0,0 +1,122 @@ +package serializer + +import ( + "bytes" + "encoding/json" + "errors" + + "github.com/weaveworks/libgitops/pkg/util/patch" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/strategicpatch" + openapi "k8s.io/kube-openapi/pkg/util/proto" +) + +type Patcher interface { + // ApplyOnStruct applies the given patch (JSON-encoded) using the given BytePatcher + // (that knows how to operate on that kind of patch type) into obj. + // + // obj MUST be a typed object. Unversioned, partial or unstructured objects are not + // supported. For those use-cases, convert your object into an unstructured one, and + // pass it to ApplyOnUnstructured. + // + // obj MUST NOT be an internal type. If you operate on an internal object as your "hub", + // convert the object yourself first to the GroupVersion of the patch bytes, and then + // convert back after this call. + // + // In case the patch would require knowledge about the schema (e.g. StrategicMergePatch), + // this function looks that metadata up using reflection of obj. + ApplyOnStruct(bytePatcher patch.BytePatcher, patch []byte, obj runtime.Object) error + + // ApplyOnUnstructured applies the given patch (JSON-encoded) using the given BytePatcher + // (that knows how to operate on that kind of patch type) into the unstructured obj. + // + // If knowledge about the schema is required by the patch type (e.g. StrategicMergePatch), + // it is the liability of the caller to provide an OpenAPI schema. + ApplyOnUnstructured(bytePatcher patch.BytePatcher, patch []byte, obj runtime.Unstructured, schema openapi.Schema) error +} + +type patcher struct { + *schemeAndCodec +} + +// ApplyOnStruct applies the given patch (JSON-encoded) using the given BytePatcher +// (that knows how to operate on that kind of patch type) into obj. +// +// obj MUST be a typed object. Unversioned, partial or unstructured objects are not +// supported. For those use-cases, convert your object into an unstructured one, and +// pass it to ApplyOnUnstructured. +// +// obj MUST NOT be an internal type. If you operate on an internal object as your "hub", +// convert the object yourself first to the GroupVersion of the patch bytes, and then +// convert back after this call. +// +// In case the patch would require knowledge about the schema (e.g. StrategicMergePatch), +// this function looks that metadata up using reflection of obj. +func (p *patcher) ApplyOnStruct(bytePatcher patch.BytePatcher, patch []byte, obj runtime.Object) error { + // Require that obj is typed + if !IsTyped(obj, p.scheme) { + return errors.New("obj must be typed") + } + // Get the GVK so we can check if obj is internal + gvk, err := GVKForObject(p.scheme, obj) + if err != nil { + return err + } + // It must not be internal, as we will encode it soon. + if gvk.Version == runtime.APIVersionInternal { + return errors.New("obj must not be internal") + } + + // Create a non-pretty encoder + encopt := *defaultEncodeOpts().ApplyOptions([]EncodeOption{PrettyEncode(false)}) + enc := newEncoder(p.schemeAndCodec, encopt) + // Encode without conversion to the buffer + var buf bytes.Buffer + if err := enc.EncodeForGroupVersion(NewJSONFrameWriter(&buf), obj, gvk.GroupVersion()); err != nil { + return err + } + + // Get the schema in case needed by the BytePatcher + schema, err := strategicpatch.NewPatchMetaFromStruct(obj) + if err != nil { + return err + } + + // Apply the patch, and get the new JSON out + newJSON, err := bytePatcher.Apply(buf.Bytes(), patch, schema) + if err != nil { + return err + } + + // Decode into the object to apply the changes + fr := NewSingleFrameReader(newJSON, ContentTypeJSON) + dec := newDecoder(p.schemeAndCodec, *defaultDecodeOpts()) + if err := dec.DecodeInto(fr, obj); err != nil { + return err + } + + return nil +} + +func (p *patcher) ApplyOnUnstructured(bytePatcher patch.BytePatcher, patch []byte, obj runtime.Unstructured, schema openapi.Schema) error { + // Marshal the object to form the source JSON + sourceJSON, err := json.Marshal(obj) + if err != nil { + return err + } + + // Conditionally get the schema from the provided OpenAPI spec + var patchMeta strategicpatch.LookupPatchMeta + if schema != nil { + patchMeta = strategicpatch.NewPatchMetaFromOpenAPI(schema) + } + + // Apply the patch, and get the new JSON out + newJSON, err := bytePatcher.Apply(sourceJSON, patch, patchMeta) + if err != nil { + return err + } + + // Decode back into obj + return json.Unmarshal(newJSON, obj) +} diff --git a/pkg/serializer/serializer.go b/pkg/serializer/serializer.go index 48302f92..97bc5524 100644 --- a/pkg/serializer/serializer.go +++ b/pkg/serializer/serializer.go @@ -87,6 +87,8 @@ type Serializer interface { // Defaulter is a high-level interface for accessing defaulting functions in a scheme Defaulter() Defaulter + Patcher() Patcher + // Scheme provides access to the underlying runtime.Scheme, may be used for low-level access to // the "type universe" and advanced conversion/defaulting features Scheme() *runtime.Scheme @@ -222,14 +224,16 @@ func NewSerializer(scheme *runtime.Scheme, codecs *k8sserializer.CodecFactory) S *codecs = k8sserializer.NewCodecFactory(scheme) } + schemeCodec := &schemeAndCodec{ + scheme: scheme, + schemeMu: &sync.Mutex{}, + codecs: codecs, + } return &serializer{ - schemeAndCodec: &schemeAndCodec{ - scheme: scheme, - schemeMu: &sync.Mutex{}, - codecs: codecs, - }, - converter: newConverter(scheme), - defaulter: newDefaulter(scheme), + schemeAndCodec: schemeCodec, + converter: newConverter(scheme), + defaulter: newDefaulter(scheme), + patcher: &patcher{schemeCodec}, } } @@ -238,6 +242,7 @@ type serializer struct { *schemeAndCodec converter *converter defaulter *defaulter + patcher *patcher } // Scheme provides access to the underlying runtime.Scheme, may be used for low-level access to @@ -268,6 +273,10 @@ func (s *serializer) Defaulter() Defaulter { return s.defaulter } +func (s *serializer) Patcher() Patcher { + return s.patcher +} + func prioritizedVersionForGroup(scheme *runtime.Scheme, groupName string) (schema.GroupVersion, error) { // Get the prioritized versions for the given group gvs := scheme.PrioritizedVersionsForGroup(groupName) diff --git a/pkg/serializer/utils.go b/pkg/serializer/utils.go index 98886e0d..f916a7a5 100644 --- a/pkg/serializer/utils.go +++ b/pkg/serializer/utils.go @@ -6,6 +6,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/client" @@ -92,13 +93,29 @@ func IsPartialObjectList(obj runtime.Object) bool { return isPartialList } +// IsUnstructured checks if obj is runtime.Unstructured func IsUnstructured(obj runtime.Object) bool { _, isUnstructured := obj.(runtime.Unstructured) return isUnstructured } +// IsUnstructuredList checks if obj is *unstructured.UnstructuredList +func IsUnstructuredList(obj runtime.Object) bool { + _, isUnstructuredList := obj.(*unstructured.UnstructuredList) + return isUnstructuredList +} + // IsNonConvertible returns true for unstructured, partial and unknown objects // that should not be converted. func IsNonConvertible(obj runtime.Object) bool { + // TODO: Should Lists also be marked non-convertible? + // IsUnstructured also covers IsUnstructuredList -- *UnstructuredList implements runtime.Unstructured return IsUnstructured(obj) || IsPartialObject(obj) || IsPartialObjectList(obj) || IsUnknown(obj) } + +// IsTyped returns true if the object is typed, i.e. registered with the given +// scheme and not unversioned. +func IsTyped(obj runtime.Object, scheme *runtime.Scheme) bool { + _, isUnversioned, err := scheme.ObjectKinds(obj) + return !isUnversioned && err == nil +} From da0e9776a66e94bae8a8ec5ed9d40bd740e22240 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 14 Jan 2021 11:19:33 +0200 Subject: [PATCH 019/149] Fix default value in godoc --- pkg/serializer/options.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/serializer/options.go b/pkg/serializer/options.go index 466372de..fc1a4547 100644 --- a/pkg/serializer/options.go +++ b/pkg/serializer/options.go @@ -19,7 +19,7 @@ type EncodeOptions struct { // Set this to 0, use PrettyEncode(false) or JSONIndent(0) to disable pretty output. // Only applicable to ContentTypeJSON framers. // - // Default: nil or 0, means no indentation + // Default: 2, i.e. pretty output // TODO: Make this a property of the FrameWriter instead? JSONIndent *int From 02e0e447aef8be5f10ab63560203a2e8d58e647f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 14 Jan 2021 11:20:11 +0200 Subject: [PATCH 020/149] update import --- pkg/gitdir/transport.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/gitdir/transport.go b/pkg/gitdir/transport.go index df2c325e..408c0b2d 100644 --- a/pkg/gitdir/transport.go +++ b/pkg/gitdir/transport.go @@ -4,7 +4,7 @@ import ( "errors" "github.com/fluxcd/go-git-providers/gitprovider" - "github.com/fluxcd/toolkit/pkg/ssh/knownhosts" + "github.com/fluxcd/pkg/ssh/knownhosts" "github.com/go-git/go-git/v5/plumbing/transport" "github.com/go-git/go-git/v5/plumbing/transport/http" "github.com/go-git/go-git/v5/plumbing/transport/ssh" From 23b74bfd14609faa97aaecb5c7a10c07bd1fc935 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 14 Jan 2021 11:24:57 +0200 Subject: [PATCH 021/149] Add context & a Namespacer to RawStorage, and support a bit more variety of paths --- pkg/storage/rawstorage.go | 241 ++++++++++++++++++++---------- pkg/storage/rawstorage_options.go | 42 ++++++ 2 files changed, 201 insertions(+), 82 deletions(-) create mode 100644 pkg/storage/rawstorage_options.go diff --git a/pkg/storage/rawstorage.go b/pkg/storage/rawstorage.go index 0533595d..aa8ab642 100644 --- a/pkg/storage/rawstorage.go +++ b/pkg/storage/rawstorage.go @@ -1,124 +1,150 @@ package storage import ( + "context" + "errors" "fmt" "io/ioutil" "os" - "path" "path/filepath" "strconv" "strings" - "github.com/weaveworks/libgitops/pkg/runtime" "github.com/weaveworks/libgitops/pkg/serializer" "github.com/weaveworks/libgitops/pkg/util" - "k8s.io/apimachinery/pkg/runtime/schema" ) // RawStorage is a Key-indexed low-level interface to // store byte-encoded Objects (resources) in non-volatile // memory. +// TODO: Add thread-safety so it is not possible to issue a Write() or Delete() +// at the same time as any other read operation. type RawStorage interface { // Read returns a resource's content based on key. // If the resource does not exist, it returns ErrNotFound. - Read(key ObjectKey) ([]byte, error) + Read(ctx context.Context, key ObjectKey) ([]byte, error) // Exists checks if the resource indicated by key exists. - Exists(key ObjectKey) bool + Exists(ctx context.Context, key ObjectKey) bool // Write writes the given content to the resource indicated by key. // Error returns are implementation-specific. - Write(key ObjectKey, content []byte) error + Write(ctx context.Context, key ObjectKey, content []byte) error // Delete deletes the resource indicated by key. // If the resource does not exist, it returns ErrNotFound. - Delete(key ObjectKey) error + Delete(ctx context.Context, key ObjectKey) error // List returns all matching object keys based on the given KindKey. - List(key KindKey) ([]ObjectKey, error) + List(ctx context.Context, key KindKey) ([]ObjectKey, error) // Checksum returns a string checksum for the resource indicated by key. // If the resource does not exist, it returns ErrNotFound. - Checksum(key ObjectKey) (string, error) + Checksum(ctx context.Context, key ObjectKey) (string, error) // ContentType returns the content type of the contents of the resource indicated by key. - ContentType(key ObjectKey) serializer.ContentType + ContentType(ctx context.Context, key ObjectKey) serializer.ContentType + + // TODO: A Stat() command instead of Exists/Checksum/ContentType? // WatchDir returns the path for Watchers to watch changes in. WatchDir() string // GetKey retrieves the Key containing the virtual path based // on the given physical file path returned by a Watcher. + // TODO: Make this a separate interface GetKey(path string) (ObjectKey, error) + + // Namespacer gives access to the namespacer that is used + Namespacer() Namespacer } -func NewGenericRawStorage(dir string, gv schema.GroupVersion, ct serializer.ContentType) RawStorage { +func NewGenericRawStorage(dir string, ct serializer.ContentType, namespacer Namespacer, opts ...GenericRawStorageOption) RawStorage { + if len(dir) == 0 { + panic("NewGenericRawStorage: dir is mandatory") + } ext := extForContentType(ct) if ext == "" { - panic("Invalid content type") + panic("NewGenericRawStorage: Invalid content type") + } + if namespacer == nil { + panic("NewGenericRawStorage: namespacer is mandatory") } + o := (&GenericRawStorageOptions{}).ApplyOptions(opts) return &GenericRawStorage{ - dir: dir, - gv: gv, - ct: ct, - ext: ext, + dir: dir, + ct: ct, + namespacer: namespacer, + opts: *o, + ext: ext, } } // GenericRawStorage is a rawstorage which stores objects as JSON files on disk, -// in the form: ///metadata.json. +// in either of the forms: +// ////. +// ///. // The GenericRawStorage only supports one GroupVersion at a time, and will error if given // any other resources type GenericRawStorage struct { - dir string - gv schema.GroupVersion - ct serializer.ContentType - ext string + dir string + ct serializer.ContentType + ext string + namespacer Namespacer + opts GenericRawStorageOptions } func (r *GenericRawStorage) keyPath(key ObjectKey) string { - return path.Join(r.dir, key.GetKind(), key.GetIdentifier(), fmt.Sprintf("metadata%s", r.ext)) + // // + paths := []string{r.kindKeyPath(key.Kind())} + if r.isNamespaced(key.Kind()) { + // .// + paths = append(paths, key.NamespacedName().Namespace) + } + if r.opts.SubDirectoryFileName == nil { + // ./. + paths = append(paths, key.NamespacedName().Name+r.ext) + } else { + // .//. + paths = append(paths, key.NamespacedName().Name, *r.opts.SubDirectoryFileName+r.ext) + } + + return filepath.Join(paths...) } -func (r *GenericRawStorage) kindKeyPath(kindKey KindKey) string { - return path.Join(r.dir, kindKey.GetKind()) +func (r *GenericRawStorage) Namespacer() Namespacer { + return r.namespacer } -func (r *GenericRawStorage) validateGroupVersion(kind KindKey) error { - if r.gv.Group == kind.GetGroup() && r.gv.Version == kind.GetVersion() { - return nil +func (r *GenericRawStorage) isNamespaced(gvk KindKey) bool { + namespaced, err := r.namespacer.IsNamespaced(gvk.GroupKind()) + if err != nil { + panic(err) // TODO: handle this better } - - return fmt.Errorf("GroupVersion %s/%s not supported by this GenericRawStorage", kind.GetGroup(), kind.GetVersion()) + return namespaced } -func (r *GenericRawStorage) Read(key ObjectKey) ([]byte, error) { - // Validate GroupVersion first - if err := r.validateGroupVersion(key); err != nil { - return nil, err +func (r *GenericRawStorage) kindKeyPath(gvk KindKey) string { + if r.opts.DisableGroupDirectory != nil && *r.opts.DisableGroupDirectory { + // /// + return filepath.Join(r.dir, gvk.Kind) } + // //// + return filepath.Join(r.dir, gvk.Group, gvk.Kind) +} +func (r *GenericRawStorage) Read(ctx context.Context, key ObjectKey) ([]byte, error) { // Check if the resource indicated by key exists - if !r.Exists(key) { + if !r.Exists(ctx, key) { return nil, ErrNotFound } return ioutil.ReadFile(r.keyPath(key)) } -func (r *GenericRawStorage) Exists(key ObjectKey) bool { - // Validate GroupVersion first - if err := r.validateGroupVersion(key); err != nil { - return false - } - +func (r *GenericRawStorage) Exists(_ context.Context, key ObjectKey) bool { return util.FileExists(r.keyPath(key)) } -func (r *GenericRawStorage) Write(key ObjectKey, content []byte) error { - // Validate GroupVersion first - if err := r.validateGroupVersion(key); err != nil { - return err - } - +func (r *GenericRawStorage) Write(ctx context.Context, key ObjectKey, content []byte) error { file := r.keyPath(key) // Create the underlying directories if they do not exist already - if !r.Exists(key) { - if err := os.MkdirAll(path.Dir(file), 0755); err != nil { + if !r.Exists(ctx, key) { + if err := os.MkdirAll(filepath.Dir(file), 0755); err != nil { return err } } @@ -126,65 +152,94 @@ func (r *GenericRawStorage) Write(key ObjectKey, content []byte) error { return ioutil.WriteFile(file, content, 0644) } -func (r *GenericRawStorage) Delete(key ObjectKey) error { - // Validate GroupVersion first - if err := r.validateGroupVersion(key); err != nil { - return err - } - +func (r *GenericRawStorage) Delete(ctx context.Context, key ObjectKey) error { // Check if the resource indicated by key exists - if !r.Exists(key) { + if !r.Exists(ctx, key) { return ErrNotFound } - return os.RemoveAll(path.Dir(r.keyPath(key))) + return os.RemoveAll(filepath.Dir(r.keyPath(key))) } -func (r *GenericRawStorage) List(kind KindKey) ([]ObjectKey, error) { - // Validate GroupVersion first - if err := r.validateGroupVersion(kind); err != nil { - return nil, err - } - +func (r *GenericRawStorage) List(_ context.Context, kind KindKey) ([]ObjectKey, error) { // If the expected directory does not exist, just return an empty (nil) slice dir := r.kindKeyPath(kind) - if ok, fi := util.PathExists(dir); !ok { - return nil, nil - } else if !fi.IsDir() { - return nil, fmt.Errorf("expected that %s is a directory", dir) + + var keys []ObjectKey + if !r.isNamespaced(kind) { + // Names are listed in kindKeyPath + names, err := r.listNamesInDir(dir) + if err != nil { + return nil, err + } + for _, name := range names { + keys = append(keys, NewObjectKey(kind, NamespacedName{Name: name})) + } + return keys, nil } - // When we know that path is a directory, go ahead and read it - entries, err := ioutil.ReadDir(dir) + // Namespaces are listed in kindKeyPath + namespaces, err := readDir(dir) if err != nil { return nil, err } + for _, namespace := range namespaces { + // Names are listed in / + names, err := r.listNamesInDir(filepath.Join(dir, namespace)) + if err != nil { + return nil, err + } + for _, name := range names { + keys = append(keys, NewObjectKey(kind, NamespacedName{Name: name, Namespace: namespace})) + } + } - result := make([]ObjectKey, 0, len(entries)) - for _, entry := range entries { - result = append(result, NewObjectKey(kind, runtime.NewIdentifier(entry.Name()))) + return keys, nil +} + +func (r *GenericRawStorage) listNamesInDir(dir string) ([]string, error) { + entries, err := readDir(dir) + if err != nil { + return nil, err } - return result, nil + names := make([]string, 0, len(entries)) + for _, entry := range entries { + // Loop through all names, and make sure they are sanitized .metadata.name's + // If r.opts.SubDirectoryFileName != nil, the file names already match .metadata.name + if r.opts.SubDirectoryFileName != nil { + // TODO: We could add even stronger validation here + // Make sure the file /<.metadata.name>/. actually exists. + // It could be that only the .metadata.name directory exists, but not the file underneath. + expectedPath := filepath.Join(dir, entry, *r.opts.SubDirectoryFileName+r.ext) + if util.FileExists(expectedPath) { + names = append(names, entry) + } + continue + } + + // Storage path is ./.. entry is "." + // Verify the extension is there and strip it from name. If ext isn't there, just continue + if !strings.HasSuffix(entry, r.ext) { + continue + } + names = append(names, strings.TrimSuffix(entry, r.ext)) + } + return names, nil } // This returns the modification time as a UnixNano string // If the file doesn't exist, return ErrNotFound -func (r *GenericRawStorage) Checksum(key ObjectKey) (string, error) { - // Validate GroupVersion first - if err := r.validateGroupVersion(key); err != nil { - return "", err - } - +func (r *GenericRawStorage) Checksum(ctx context.Context, key ObjectKey) (string, error) { // Check if the resource indicated by key exists - if !r.Exists(key) { + if !r.Exists(ctx, key) { return "", ErrNotFound } return checksumFromModTime(r.keyPath(key)) } -func (r *GenericRawStorage) ContentType(_ ObjectKey) serializer.ContentType { +func (r *GenericRawStorage) ContentType(_ context.Context, _ ObjectKey) serializer.ContentType { return r.ct } @@ -193,7 +248,9 @@ func (r *GenericRawStorage) WatchDir() string { } func (r *GenericRawStorage) GetKey(p string) (ObjectKey, error) { - splitDir := strings.Split(filepath.Clean(r.dir), string(os.PathSeparator)) + /* TODO: Needs re-writing + + splitDir := strings.Split(filepath.Clean(r.opts.Directory), string(os.PathSeparator)) splitPath := strings.Split(filepath.Clean(p), string(os.PathSeparator)) if len(splitPath) < len(splitDir)+2 { @@ -213,7 +270,8 @@ func (r *GenericRawStorage) GetKey(p string) (ObjectKey, error) { Kind: kind, } - return NewObjectKey(NewKindKey(gvk), runtime.NewIdentifier(uid)), nil + return NewObjectKey(NewKindKey(gvk), runtime.NewIdentifier(uid)), nil*/ + return nil, errors.New("not implemented") } func checksumFromModTime(path string) (string, error) { @@ -224,3 +282,22 @@ func checksumFromModTime(path string) (string, error) { return strconv.FormatInt(fi.ModTime().UnixNano(), 10), nil } + +func readDir(dir string) ([]string, error) { + if ok, fi := util.PathExists(dir); !ok { + return nil, nil + } else if !fi.IsDir() { + return nil, fmt.Errorf("expected that %s is a directory", dir) + } + + // When we know that path is a directory, go ahead and read it + entries, err := ioutil.ReadDir(dir) + if err != nil { + return nil, err + } + fileNames := make([]string, 0, len(entries)) + for _, entry := range entries { + fileNames = append(fileNames, entry.Name()) + } + return fileNames, nil +} diff --git a/pkg/storage/rawstorage_options.go b/pkg/storage/rawstorage_options.go new file mode 100644 index 00000000..709dc94c --- /dev/null +++ b/pkg/storage/rawstorage_options.go @@ -0,0 +1,42 @@ +package storage + +import "github.com/weaveworks/libgitops/pkg/util" + +type GenericRawStorageOption interface { + ApplyToGenericRawStorage(*GenericRawStorageOptions) +} + +type GenericRawStorageOptions struct { + // SubDirectoryFileName specifies an alternate storage path form of + // /////. + // if non-empty + // +optional + SubDirectoryFileName *string + // DisableGroupDirectory can be set to true in order to not include the group + // in the file path, so that the storage path becomes: + // ///. + // +optional + DisableGroupDirectory *bool +} + +func (o *GenericRawStorageOptions) ApplyToGenericRawStorage(target *GenericRawStorageOptions) { + if o.SubDirectoryFileName != nil { + target.SubDirectoryFileName = o.SubDirectoryFileName + } + if o.DisableGroupDirectory != nil { + target.DisableGroupDirectory = o.DisableGroupDirectory + } +} + +func (o *GenericRawStorageOptions) ApplyOptions(opts []GenericRawStorageOption) *GenericRawStorageOptions { + for _, opt := range opts { + opt.ApplyToGenericRawStorage(o) + } + return o +} + +type NoGroupDirectory bool + +func (d NoGroupDirectory) ApplyToGenericRawStorage(target *GenericRawStorageOptions) { + target.DisableGroupDirectory = util.BoolPtr(bool(d)) +} From 4ec5457adb3bb19d3656303c20a582d1633fef2e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 14 Jan 2021 15:44:11 +0200 Subject: [PATCH 022/149] Rewrite patchutil to only operate on bytes, and add support for different patch types. The "object patching" part has been moved to the serializer. --- pkg/util/patch/patch.go | 137 ++++++++++++++++------------------- pkg/util/patch/patch_test.go | 5 ++ 2 files changed, 66 insertions(+), 76 deletions(-) diff --git a/pkg/util/patch/patch.go b/pkg/util/patch/patch.go index 11c29ea8..535be559 100644 --- a/pkg/util/patch/patch.go +++ b/pkg/util/patch/patch.go @@ -1,103 +1,88 @@ package patch import ( - "bytes" + "encoding/json" "fmt" - "io/ioutil" - "github.com/weaveworks/libgitops/pkg/runtime" - "github.com/weaveworks/libgitops/pkg/serializer" - "k8s.io/apimachinery/pkg/runtime/schema" + jsonbytepatcher "github.com/evanphx/json-patch" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" ) -type Patcher interface { - Create(new runtime.Object, applyFn func(runtime.Object) error) ([]byte, error) - Apply(original, patch []byte, gvk schema.GroupVersionKind) ([]byte, error) - ApplyOnFile(filePath string, patch []byte, gvk schema.GroupVersionKind) error +// BytePatcherForType returns the right BytePatcher for the given +// patch type. +// +// Note: if patchType is unknown, the return value will be nil, so make +// sure you check the BytePatcher is non-nil before using it! +func BytePatcherForType(patchType types.PatchType) BytePatcher { + switch patchType { + case types.JSONPatchType: + return JSONBytePatcher{} + case types.MergePatchType: + return MergeBytePatcher{} + case types.StrategicMergePatchType: + return StrategicMergeBytePatcher{} + default: + return nil + } } -func NewPatcher(s serializer.Serializer) Patcher { - return &patcher{serializer: s} -} +// maximum number of operations a single json patch may contain. +const maxJSONBytePatcherOperations = 10000 -type patcher struct { - serializer serializer.Serializer +type BytePatcher interface { + // TODO: SupportedType() types.PatchType + // currentData must be versioned bytes of the same GVK as into and patch.Data() (if merge patch) + // into must be an empty object + Apply(currentJSON, patchJSON []byte, schema strategicpatch.LookupPatchMeta) ([]byte, error) } -// Create is a helper that creates a patch out of the change made in applyFn -func (p *patcher) Create(new runtime.Object, applyFn func(runtime.Object) error) (patchBytes []byte, err error) { - var oldBytes, newBytes bytes.Buffer - encoder := p.serializer.Encoder() - old := new.DeepCopyObject().(runtime.Object) - - if err = encoder.Encode(serializer.NewJSONFrameWriter(&oldBytes), old); err != nil { - return - } - - if err = applyFn(new); err != nil { - return - } - - if err = encoder.Encode(serializer.NewJSONFrameWriter(&newBytes), new); err != nil { - return +type JSONBytePatcher struct{} + +func (JSONBytePatcher) Apply(currentJSON, patchJSON []byte, _ strategicpatch.LookupPatchMeta) ([]byte, error) { + // sanity check potentially abusive patches + // TODO(liggitt): drop this once golang json parser limits stack depth (https://github.com/golang/go/issues/31789) + // TODO(luxas): Go v1.15 has the above mentioned patch, what needs changing now? + if len(patchJSON) > 1024*1024 { + v := []interface{}{} + if err := json.Unmarshal(patchJSON, &v); err != nil { + return nil, fmt.Errorf("error decoding patch: %v", err) + } } - emptyObj, err := p.serializer.Scheme().New(old.GetObjectKind().GroupVersionKind()) - if err != nil { - return - } - - patchBytes, err = strategicpatch.CreateTwoWayMergePatch(oldBytes.Bytes(), newBytes.Bytes(), emptyObj) - if err != nil { - return nil, fmt.Errorf("CreateTwoWayMergePatch failed: %v", err) - } - - return patchBytes, nil -} - -func (p *patcher) Apply(original, patch []byte, gvk schema.GroupVersionKind) ([]byte, error) { - emptyObj, err := p.serializer.Scheme().New(gvk) + patchObj, err := jsonbytepatcher.DecodePatch(patchJSON) if err != nil { return nil, err } - - b, err := strategicpatch.StrategicMergePatch(original, patch, emptyObj) - if err != nil { - return nil, err + if len(patchObj) > maxJSONBytePatcherOperations { + return nil, errors.NewRequestEntityTooLargeError( + fmt.Sprintf("The allowed maximum operations in a JSON patch is %d, got %d", + maxJSONBytePatcherOperations, len(patchObj))) } - - return p.serializerEncode(b) + return patchObj.Apply(currentJSON) } -func (p *patcher) ApplyOnFile(filePath string, patch []byte, gvk schema.GroupVersionKind) error { - oldContent, err := ioutil.ReadFile(filePath) - if err != nil { - return err - } - - newContent, err := p.Apply(oldContent, patch, gvk) - if err != nil { - return err +type MergeBytePatcher struct{} + +func (MergeBytePatcher) Apply(currentJSON, patchJSON []byte, _ strategicpatch.LookupPatchMeta) ([]byte, error) { + // sanity check potentially abusive patches + // TODO(liggitt): drop this once golang json parser limits stack depth (https://github.com/golang/go/issues/31789) + // TODO(luxas): Go v1.15 has the above mentioned patch, what needs changing now? + if len(patchJSON) > 1024*1024 { + v := map[string]interface{}{} + if err := json.Unmarshal(patchJSON, &v); err != nil { + return nil, errors.NewBadRequest(fmt.Sprintf("error decoding patch: %v", err)) + } } - return ioutil.WriteFile(filePath, newContent, 0644) + return jsonbytepatcher.MergePatch(currentJSON, patchJSON) } -// StrategicMergePatch returns an unindented, unorganized JSON byte slice, -// this helper takes that as an input and returns the same JSON re-encoded -// with the serializer so it conforms to a runtime.Object -// TODO: Just use encoding/json.Indent here instead? -func (p *patcher) serializerEncode(input []byte) ([]byte, error) { - obj, err := p.serializer.Decoder().Decode(serializer.NewJSONFrameReader(serializer.FromBytes(input))) - if err != nil { - return nil, err - } - - var result bytes.Buffer - if err := p.serializer.Encoder().Encode(serializer.NewJSONFrameWriter(&result), obj); err != nil { - return nil, err - } +type StrategicMergeBytePatcher struct{} - return result.Bytes(), err +func (StrategicMergeBytePatcher) Apply(currentJSON, patchJSON []byte, schema strategicpatch.LookupPatchMeta) ([]byte, error) { + // TODO: Also check for overflow here? + // TODO: What to do when schema is nil? error? + return strategicpatch.StrategicMergePatchUsingLookupPatchMeta(currentJSON, patchJSON, schema) } diff --git a/pkg/util/patch/patch_test.go b/pkg/util/patch/patch_test.go index 9a3cf542..c9d1b01b 100644 --- a/pkg/util/patch/patch_test.go +++ b/pkg/util/patch/patch_test.go @@ -1,5 +1,9 @@ package patch +/* + +TODO: Create good unit tests for this package! + import ( "bytes" "testing" @@ -58,3 +62,4 @@ func TestApplyPatch(t *testing.T) { t.Fatal(err) } } +*/ From 173c31a1805dd9053c216b8fa4b83b31bb454f8e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 14 Jan 2021 15:45:23 +0200 Subject: [PATCH 023/149] Rewrite the key system; import most things from pkg/client --- pkg/storage/key.go | 74 +++++++++++++++++++--------------------------- 1 file changed, 31 insertions(+), 43 deletions(-) diff --git a/pkg/storage/key.go b/pkg/storage/key.go index 015cac41..fb4c0ab0 100644 --- a/pkg/storage/key.go +++ b/pkg/storage/key.go @@ -1,64 +1,52 @@ package storage import ( - "github.com/weaveworks/libgitops/pkg/runtime" + "errors" + "fmt" + + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" ) -type kindKey schema.GroupVersionKind - -func (gvk kindKey) GetGroup() string { return gvk.Group } -func (gvk kindKey) GetVersion() string { return gvk.Version } -func (gvk kindKey) GetKind() string { return gvk.Kind } -func (gvk kindKey) GetGVK() schema.GroupVersionKind { return schema.GroupVersionKind(gvk) } -func (gvk kindKey) EqualsGVK(kind KindKey, respectVersion bool) bool { - // Make sure kind and group match, otherwise return false - if gvk.GetKind() != kind.GetKind() || gvk.GetGroup() != kind.GetGroup() { - return false - } - // If we allow version mismatches (i.e. don't need to respect the version), return true - if !respectVersion { - return true - } - // Otherwise, return true if the version also is the same - return gvk.GetVersion() == kind.GetVersion() -} -func (gvk kindKey) String() string { return gvk.GetGVK().String() } - -// kindKey implements KindKey. -var _ KindKey = kindKey{} +// Aliases +type Object = client.Object +type ObjectList = client.ObjectList +type KindKey = schema.GroupVersionKind +type NamespacedName = types.NamespacedName +type Patch = client.Patch -type KindKey interface { - // String implements fmt.Stringer - String() string - - GetGroup() string - GetVersion() string - GetKind() string - GetGVK() schema.GroupVersionKind - - EqualsGVK(kind KindKey, respectVersion bool) bool -} +var ErrNoMetadata = errors.New("it is required to embed ObjectMeta into the serialized API type") type ObjectKey interface { - KindKey - runtime.Identifyable + Kind() KindKey + NamespacedName() NamespacedName } // objectKey implements ObjectKey. var _ ObjectKey = &objectKey{} type objectKey struct { - KindKey - runtime.Identifyable + kind KindKey + name NamespacedName } -func (key objectKey) String() string { return key.KindKey.String() + " " + key.GetIdentifier() } +func (key objectKey) Kind() KindKey { return key.kind } +func (key objectKey) NamespacedName() NamespacedName { return key.name } -func NewKindKey(gvk schema.GroupVersionKind) KindKey { - return kindKey(gvk) +func NewObjectKey(kind KindKey, name NamespacedName) ObjectKey { + return objectKey{kind, name} } -func NewObjectKey(kind KindKey, id runtime.Identifyable) ObjectKey { - return objectKey{kind, id} +func NewObjectForGVK(kind KindKey, scheme *runtime.Scheme) (Object, error) { + kobj, err := scheme.New(kind) + if err != nil { + return nil, err + } + obj, ok := kobj.(Object) + if !ok { + return nil, fmt.Errorf("%w: %s", ErrNoMetadata, kind) + } + return obj, nil } From 97db70af3a1d758649e626f10f6679aff82b2087 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 14 Jan 2021 15:45:56 +0200 Subject: [PATCH 024/149] First snapshot of the storage system rewrite. --- pkg/storage/options.go | 75 ++++++ pkg/storage/storage.go | 576 +++++++++++++++++++++++------------------ 2 files changed, 396 insertions(+), 255 deletions(-) create mode 100644 pkg/storage/options.go diff --git a/pkg/storage/options.go b/pkg/storage/options.go new file mode 100644 index 00000000..6768a0a9 --- /dev/null +++ b/pkg/storage/options.go @@ -0,0 +1,75 @@ +package storage + +import ( + "github.com/weaveworks/libgitops/pkg/filter" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type ListOption interface { + client.ListOption + filter.FilterOption +} + +type ListOptions struct { + client.ListOptions + filter.FilterOptions +} + +var _ ListOption = &ListOptions{} + +func (o *ListOptions) ApplyToList(target *client.ListOptions) { + o.ListOptions.ApplyToList(target) +} + +func (o *ListOptions) ApplyToFilterOptions(target *filter.FilterOptions) { + o.FilterOptions.ApplyToFilterOptions(target) +} + +func (o *ListOptions) ApplyOptions(opts []client.ListOption) *ListOptions { + // Apply the "normal" ListOptions + o.ListOptions.ApplyOptions(opts) + // Apply all FilterOptions, if they implement that interface + for _, opt := range opts { + o.FilterOptions.ApplyOption(opt) + } + + // If listOpts.Namespace was given, add it to the list of ObjectFilters + if len(o.Namespace) != 0 { + o.ObjectFilters = append(o.ObjectFilters, filter.NamespaceFilter{Namespace: o.Namespace}) + } + // If listOpts.LabelSelector was given, add it to the list of ObjectFilters + if o.LabelSelector != nil { + o.ObjectFilters = append(o.ObjectFilters, filter.LabelsFilter{LabelSelector: o.LabelSelector}) + } + + return o +} + +type DeleteAllOfOption interface { + ListOption + client.DeleteAllOfOption +} + +type DeleteAllOfOptions struct { + ListOptions + client.DeleteOptions +} + +var _ DeleteAllOfOption = &DeleteAllOfOptions{} + +func (o *DeleteAllOfOptions) ApplyToDeleteAllOf(target *client.DeleteAllOfOptions) { + o.DeleteOptions.ApplyToDelete(&target.DeleteOptions) +} + +func (o *DeleteAllOfOptions) ApplyOptions(opts []client.DeleteAllOfOption) *DeleteAllOfOptions { + // Cannot directly apply to o, hence, create a temporary object to which upstream opts are applied + do := (&client.DeleteAllOfOptions{}).ApplyOptions(opts) + o.ListOptions.ListOptions = do.ListOptions + o.DeleteOptions = do.DeleteOptions + + // Apply all FilterOptions, if they implement that interface + for _, opt := range opts { + o.FilterOptions.ApplyOption(opt) + } + return o +} diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index 6e75c047..2e293416 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -2,76 +2,69 @@ package storage import ( "bytes" + "context" "errors" "fmt" "io" + "sync" - "github.com/sirupsen/logrus" + "github.com/fluxcd/go-git-providers/validation" "github.com/weaveworks/libgitops/pkg/filter" "github.com/weaveworks/libgitops/pkg/runtime" "github.com/weaveworks/libgitops/pkg/serializer" patchutil "github.com/weaveworks/libgitops/pkg/util/patch" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" kruntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/yaml" + "k8s.io/apimachinery/pkg/util/sets" + "sigs.k8s.io/controller-runtime/pkg/client" ) +type NewObjectFunc func() (Object, error) + var ( + // TODO: Return the same errors as k8s does // ErrAmbiguousFind is returned when the user requested one object from a List+Filter process. ErrAmbiguousFind = errors.New("two or more results were aquired when one was expected") // ErrNotFound is returned when the requested resource wasn't found. ErrNotFound = errors.New("resource not found") // ErrAlreadyExists is returned when when WriteStorage.Create is called for an already stored object. ErrAlreadyExists = errors.New("resource already exists") + // ErrCannotSaveMetadata is returned if the user tries to save metadata-only objects + ErrCannotSaveMetadata = errors.New("cannot save (Create|Update|Patch) *metav1.PartialObjectMetadata") + // ErrNameRequired is returned when .metadata.name is unset + // TODO: Support generateName? + ErrNameRequired = errors.New(".metadata.name is required") + // ErrUnsupportedPatchType is returned when an unsupported patch type is used + ErrUnsupportedPatchType = errors.New("unsupported patch type") ) -type ReadStorage interface { - // Get returns a new Object for the resource at the specified kind/uid path, based on the file content. - // If the resource referred to by the given ObjectKey does not exist, Get returns ErrNotFound. - Get(key ObjectKey) (runtime.Object, error) - - // List lists Objects for the specific kind. Optionally, filters can be applied (see the filter package - // for more information, e.g. filter.NameFilter{} and filter.UIDFilter{}) - List(kind KindKey, opts ...filter.ListOption) ([]runtime.Object, error) - - // Find does a List underneath, also using filters, but always returns one object. If the List - // underneath returned two or more results, ErrAmbiguousFind is returned. If no match was found, - // ErrNotFound is returned. - Find(kind KindKey, opts ...filter.ListOption) (runtime.Object, error) - - // - // Partial object getters. - // TODO: Figure out what we should do with these, do we need them and if so where? - // +const ( + namespaceListKind = "NamespaceList" +) - // GetMeta returns a new Object's APIType representation for the resource at the specified kind/uid path. - // If the resource referred to by the given ObjectKey does not exist, GetMeta returns ErrNotFound. - GetMeta(key ObjectKey) (runtime.PartialObject, error) - // ListMeta lists all Objects' APIType representation. In other words, - // only metadata about each Object is unmarshalled (uid/name/kind/apiVersion). - // This allows for faster runs (no need to unmarshal "the world"), and less - // resource usage, when only metadata is unmarshalled into memory - ListMeta(kind KindKey) ([]runtime.PartialObject, error) +var v1GroupKind = schema.GroupVersion{Group: "", Version: "v1"} - // - // Cache-related methods. - // +type ObjectID interface { + GroupVersionKind() schema.GroupVersionKind + GetName() string + GetNamespace() string + GetLabels() map[string]string +} - // Checksum returns a string representing the state of an Object on disk - // The checksum should change if any modifications have been made to the - // Object on disk, it can be e.g. the Object's modification timestamp or - // calculated checksum. If the Object is not found, ErrNotFound is returned. - Checksum(key ObjectKey) (string, error) - // Count returns the amount of available Objects of a specific kind - // This is used by Caches to check if all Objects are cached to perform a List - Count(kind KindKey) (uint64, error) +func foo() { + var _ ObjectID = &metav1.PartialObjectMetadata{} +} +type CommonStorage interface { // // Access to underlying Resources. // // RawStorage returns the RawStorage instance backing this Storage + // It is expected that RawStorage only operates on one "frame" at a time in its Read/Write operations. RawStorage() RawStorage // Serializer returns the serializer Serializer() serializer.Serializer @@ -80,24 +73,26 @@ type ReadStorage interface { // Misc methods. // - // ObjectKeyFor returns the ObjectKey for the given object - ObjectKeyFor(obj runtime.Object) (ObjectKey, error) // Close closes all underlying resources (e.g. goroutines) used; before the application exits Close() error } +// ReadStorage TODO +type ReadStorage interface { + CommonStorage + + client.Reader + // TODO: In the future to support indexing "custom" fields. + // Normal fields (not counting arrays) could be supported using + // kruntime.DefaultUnstructuredConverter.ToUnstructured() in + // filter.FieldFilter + // client.FieldIndexer +} + type WriteStorage interface { - // Create creates an entry for and stores the given Object in the storage. The Object must be new to the storage. - // The ObjectMeta.CreationTimestamp field is set automatically to the current time if it is unset. - Create(obj runtime.Object) error - // Update updates the state of the given Object in the storage. The Object must exist in the storage. - // The ObjectMeta.CreationTimestamp field is set automatically to the current time if it is unset. - Update(obj runtime.Object) error - - // Patch performs a strategic merge patch on the Object with the given UID, using the byte-encoded patch given - Patch(key ObjectKey, patch []byte) error - // Delete removes an Object from the storage - Delete(key ObjectKey) error + CommonStorage + client.Writer + //client.StatusClient } // Storage is an interface for persisting and retrieving API objects to/from a backend @@ -105,19 +100,19 @@ type WriteStorage interface { type Storage interface { ReadStorage WriteStorage + //client.Client } // NewGenericStorage constructs a new Storage -func NewGenericStorage(rawStorage RawStorage, serializer serializer.Serializer, identifiers []runtime.IdentifierFactory) Storage { - return &GenericStorage{rawStorage, serializer, patchutil.NewPatcher(serializer), identifiers} +func NewGenericStorage(rawStorage RawStorage, serializer serializer.Serializer, enforcer NamespaceEnforcer) Storage { + return &GenericStorage{rawStorage, serializer, enforcer} } // GenericStorage implements the Storage interface type GenericStorage struct { - raw RawStorage - serializer serializer.Serializer - patcher patchutil.Patcher - identifiers []runtime.IdentifierFactory + raw RawStorage + serializer serializer.Serializer + enforcer NamespaceEnforcer } var _ Storage = &GenericStorage{} @@ -126,215 +121,282 @@ func (s *GenericStorage) Serializer() serializer.Serializer { return s.serializer } -// Get returns a new Object for the resource at the specified kind/uid path, based on the file content -func (s *GenericStorage) Get(key ObjectKey) (runtime.Object, error) { - content, err := s.raw.Read(key) +// Get returns a new Object for the resource at the specified kind/uid path, based on the file content. +// In order to only extract the metadata of this object, pass in a *metav1.PartialObjectMetadata +func (s *GenericStorage) Get(ctx context.Context, name NamespacedName, obj Object) error { + gvk, err := serializer.GVKForObject(s.serializer.Scheme(), obj) if err != nil { - return nil, err + return err } - return s.decode(key, content) -} - -// TODO: Verify this works -// GetMeta returns a new Object's APIType representation for the resource at the specified kind/uid path -func (s *GenericStorage) GetMeta(key ObjectKey) (runtime.PartialObject, error) { - content, err := s.raw.Read(key) + key := NewObjectKey(gvk, name) + content, err := s.raw.Read(ctx, key) if err != nil { - return nil, err + return err } - return s.decodeMeta(key, content) + ct := s.raw.ContentType(ctx, key) + // TODO: Support various decoding options, e.g. defaulting? + return s.serializer.Decoder().DecodeInto(serializer.NewSingleFrameReader(content, ct), obj) } -// TODO: Make sure we don't save a partial object -func (s *GenericStorage) write(key ObjectKey, obj runtime.Object) error { - // Set the content type based on the format given by the RawStorage, but default to JSON - contentType := serializer.ContentTypeJSON - if ct := s.raw.ContentType(key); len(ct) != 0 { - contentType = ct - } - - // Set creationTimestamp if not already populated - t := obj.GetCreationTimestamp() - if t.IsZero() { - obj.SetCreationTimestamp(metav1.Now()) +// List lists Objects for the specific kind. Optionally, filters can be applied (see the filter package +// for more information, e.g. filter.NameFilter{} and filter.UIDFilter{}) +// You can also pass in an *unstructured.UnstructuredList to get an unknown type's data or +// *metav1.PartialObjectMetadataList to just get the metadata of all objects of the specified gvk. +// If you do specify either an *unstructured.UnstructuredList or *metav1.PartialObjectMetadataList, +// you need to populate TypeMeta with the GVK you want back. +// TODO: Check if this works with metav1.List{} +func (s *GenericStorage) List(ctx context.Context, list ObjectList, opts ...client.ListOption) error { + // This call will verify that list actually is a List type. + gvk, err := serializer.GVKForList(list, s.serializer.Scheme()) + if err != nil { + return err } + // This applies both upstream and custom options + listOpts := (&ListOptions{}).ApplyOptions(opts) - var objBytes bytes.Buffer - err := s.serializer.Encoder().Encode(serializer.NewFrameWriter(contentType, &objBytes), obj) + // Do an internal list to get all objects + keys, err := s.raw.List(ctx, gvk) if err != nil { return err } - return s.raw.Write(key, objBytes.Bytes()) + ch := make(chan Object, len(keys)) // TODO: This could be less + wg := &sync.WaitGroup{} + wg.Add(1) + var processErr error + go func() { + createFunc := createObject(gvk, s.serializer.Scheme()) + if serializer.IsPartialObjectList(list) { + createFunc = createPartialObject(gvk) + } else if serializer.IsUnstructuredList(list) { + createFunc = createUnstructuredObject(gvk) + } + processErr = s.processKeys(ctx, keys, &listOpts.FilterOptions, createFunc, ch) + wg.Done() + }() + + objs := make([]kruntime.Object, 0, len(keys)) + for o := range ch { + objs = append(objs, o) + } + // Wait for processErr to be set, and the above goroutine to finish + wg.Wait() + if processErr != nil { + return processErr + } + + // Populate the List's Items field with the objects returned + meta.SetList(list, objs) + return nil } -func (s *GenericStorage) Create(obj runtime.Object) error { - key, err := s.ObjectKeyFor(obj) +func (s *GenericStorage) Create(ctx context.Context, obj Object, _ ...client.CreateOption) error { + // We must never save metadata-only structs + if serializer.IsPartialObject(obj) { + return ErrCannotSaveMetadata + } + + key, err := s.objectKeyForObj(ctx, obj) if err != nil { - return err + return nil } - if s.raw.Exists(key) { + if s.raw.Exists(ctx, key) { return ErrAlreadyExists } // The object was not found so we can safely create it - return s.write(key, obj) + return s.write(ctx, key, obj) } -func (s *GenericStorage) Update(obj runtime.Object) error { - key, err := s.ObjectKeyFor(obj) +// Note: This should also work for unstructured and partial metadata objects +func (s *GenericStorage) objectKeyForObj(ctx context.Context, obj Object) (ObjectKey, error) { + gvk, err := serializer.GVKForObject(s.serializer.Scheme(), obj) if err != nil { - return err + return nil, err } - if !s.raw.Exists(key) { - return ErrNotFound + // Object must always have .metadata.name set + if len(obj.GetName()) == 0 { + return nil, ErrNameRequired } - // The object was found so we can safely update it - return s.write(key, obj) -} - -// Patch performs a strategic merge patch on the object with the given UID, using the byte-encoded patch given -func (s *GenericStorage) Patch(key ObjectKey, patch []byte) error { - oldContent, err := s.raw.Read(key) + // Check if the GroupKind is namespaced + namespaced, err := s.raw.Namespacer().IsNamespaced(gvk.GroupKind()) if err != nil { - return err + return nil, err } - // TODO: This is a bit of a hack, but for now this works. The patcher expects only JSON, hence - // we need to handle the case when raw.Read doesn't return JSON bytes. In the future however, this - // logic should probably be rewritten completely. - if s.raw.ContentType(key) == serializer.ContentTypeYAML { - oldContent, err = yaml.YAMLToJSONStrict(oldContent) - if err != nil { - return err + var namespaces sets.String + // If the namespace enforcer requires listing all the other namespaces, + // look them up + if s.enforcer.RequireNamespaceExists() { + nsList := &metav1.PartialObjectMetadataList{} + nsList.SetGroupVersionKind(v1GroupKind.WithKind(namespaceListKind)) + if err := s.List(ctx, nsList); err != nil { + return nil, err + } + namespaces = sets.NewString() + for _, ns := range nsList.Items { + namespaces.Insert(ns.GetName()) } } - - newContent, err := s.patcher.Apply(oldContent, patch, key.GetGVK()) - if err != nil { - return err + // Enforce the given namespace policy. This might mutate obj + if err := s.enforcer.EnforceNamespace(obj, namespaced, namespaces); err != nil { + return nil, err } - return s.raw.Write(key, newContent) + // At this point we know name is non-empty, and the namespace field is correct, + // according to policy + return NewObjectKey(gvk, NamespacedName{ + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + }), nil } -// Delete removes an Object from the storage -func (s *GenericStorage) Delete(key ObjectKey) error { - return s.raw.Delete(key) -} +func (s *GenericStorage) Update(ctx context.Context, obj Object, _ ...client.UpdateOption) error { + // We must never save metadata-only structs + if serializer.IsPartialObject(obj) { + return ErrCannotSaveMetadata + } + + key, err := s.objectKeyForObj(ctx, obj) + if err != nil { + return nil + } -// Checksum returns a string representing the state of an Object on disk -func (s *GenericStorage) Checksum(key ObjectKey) (string, error) { - return s.raw.Checksum(key) + return s.update(ctx, obj, key) } -func (s *GenericStorage) list(kind KindKey) (result []runtime.Object, walkerr error) { - walkerr = s.walkKind(kind, func(key ObjectKey, content []byte) error { - obj, err := s.decode(key, content) - if err != nil { - return err - } +func (s *GenericStorage) update(ctx context.Context, obj Object, key ObjectKey) error { + if !s.raw.Exists(ctx, key) { + return ErrNotFound + } - result = append(result, obj) - return nil - }) - return + // TODO: Validation? + + // The object was found so we can safely update it + return s.write(ctx, key, obj) } -// List lists Objects for the specific kind. Optionally, filters can be applied (see the filter package -// for more information, e.g. filter.NameFilter{} and filter.UIDFilter{}) -func (s *GenericStorage) List(kind KindKey, opts ...filter.ListOption) ([]runtime.Object, error) { - // First, complete the options struct - o, err := filter.MakeListOptions(opts...) +// Patch performs a strategic merge patch on the object with the given UID, using the byte-encoded patch given +func (s *GenericStorage) Patch(ctx context.Context, obj Object, patch Patch, _ ...client.PatchOption) error { + // We must never save metadata-only structs + if serializer.IsPartialObject(obj) { + return ErrCannotSaveMetadata + } + + // Acquire the patch data from the "desired state" object given now, i.e. in MergeFrom{} + // TODO: Shall we require GVK to be present here using a meta interpreter? + patchJSON, err := patch.Data(obj) if err != nil { - return nil, err + return err } - // Do an internal list to get all objects - objs, err := s.list(kind) + // Get the object key for obj, this validates GVK, name and namespace + // We need to do this before Get to be consistent with Update & Delete + key, err := s.objectKeyForObj(ctx, obj) if err != nil { - return nil, err + return err } - // For all list filters, pipe the output of the previous as the input to the next, in order. - for _, filter := range o.Filters { - objs, err = filter.Filter(objs...) - if err != nil { - return nil, err - } + // Load the current latest state into obj temporarily, before patching it + if err := s.Get(ctx, key.NamespacedName(), obj); err != nil { + return err } - return objs, nil + + // Get the right BytePatcher for this patch type + bytePatcher := patchutil.BytePatcherForType(patch.Type()) + if bytePatcher == nil { + return fmt.Errorf("patch type not supported: %s", patch.Type()) + } + + // Apply the patch into the object using the given byte patcher + if unstruct, ok := obj.(kruntime.Unstructured); ok { + // TODO: Provide an option for the schema + err = s.serializer.Patcher().ApplyOnUnstructured(bytePatcher, patchJSON, unstruct, nil) + } else { + err = s.serializer.Patcher().ApplyOnStruct(bytePatcher, patchJSON, obj) + } + if err != nil { + return err + } + + // Perform an update internally, similar to what .Update would yield + // TODO: Maybe write to storage conditionally? + return s.update(ctx, obj, key) } -// Find does a List underneath, also using filters, but always returns one object. If the List -// underneath returned two or more results, ErrAmbiguousFind is returned. If no match was found, -// ErrNotFound is returned. -func (s *GenericStorage) Find(kind KindKey, opts ...filter.ListOption) (runtime.Object, error) { - // Do a normal list underneath - objs, err := s.List(kind, opts...) +// Delete removes an Object from the storage +// PartialObjectMetadata should work here. +func (s *GenericStorage) Delete(ctx context.Context, obj Object, _ ...client.DeleteOption) error { + // Get the key for the object + key, err := s.objectKeyForObj(ctx, obj) if err != nil { - return nil, err + return err } - // Return based on the object count - switch l := len(objs); l { - case 0: - return nil, fmt.Errorf("no Find match found: %w", ErrNotFound) - case 1: - return objs[0], nil - default: - return nil, fmt.Errorf("too many (%d) matches: %v: %w", l, objs, ErrAmbiguousFind) + + // Verify it did exist + if !s.raw.Exists(ctx, key) { + return ErrNotFound } + + // Delete it from the underlying storage + return s.raw.Delete(ctx, key) } -// ListMeta lists all Objects' APIType representation. In other words, -// only metadata about each Object is unmarshalled (uid/name/kind/apiVersion). -// This allows for faster runs (no need to unmarshal "the world"), and less -// resource usage, when only metadata is unmarshalled into memory -func (s *GenericStorage) ListMeta(kind KindKey) (result []runtime.PartialObject, walkerr error) { - walkerr = s.walkKind(kind, func(key ObjectKey, content []byte) error { +// DeleteAllOf deletes all matched resources by first doing a List() operation on the given GVK of +// obj (obj is not used for anything else) and the given filters in opts. Only the Partial Meta +func (s *GenericStorage) DeleteAllOf(ctx context.Context, obj Object, opts ...client.DeleteAllOfOption) error { + // This applies both upstream and custom options, and propagates the options correctly to both + // List() and Delete() + customDeleteAllOpts := (&DeleteAllOfOptions{}).ApplyOptions(opts) - obj, err := s.decodeMeta(key, content) - if err != nil { - return err - } + // Get the GVK of the object + gvk, err := serializer.GVKForObject(s.serializer.Scheme(), obj) + if err != nil { + return err + } - result = append(result, obj) - return nil - }) - return -} + // List all matched objects for the given ListOptions, and GVK. + // UnstructuredList is used here so that we can use filters that operate on fields + list := &unstructured.UnstructuredList{} + list.SetGroupVersionKind(gvk) + if err := s.List(ctx, list, customDeleteAllOpts); err != nil { + return err + } -// Count counts the Objects for the specific kind -func (s *GenericStorage) Count(kind KindKey) (uint64, error) { - entries, err := s.raw.List(kind) - return uint64(len(entries)), err + // Loop through all of the matched items, and Delete them one-by-one + for i := range list.Items { + if err := s.Delete(ctx, &list.Items[i], customDeleteAllOpts); err != nil { + return err + } + } + return nil } -func (s *GenericStorage) ObjectKeyFor(obj runtime.Object) (ObjectKey, error) { - var gvk schema.GroupVersionKind - var err error +func (s *GenericStorage) write(ctx context.Context, key ObjectKey, obj Object) error { + // Set the content type based on the format given by the RawStorage, but default to JSON + contentType := serializer.ContentTypeJSON + if ct := s.raw.ContentType(ctx, key); len(ct) != 0 { + contentType = ct + } - _, isPartialObject := obj.(runtime.PartialObject) - if isPartialObject { - gvk = obj.GetObjectKind().GroupVersionKind() - // TODO: Error if empty - } else { - gvk, err = serializer.GVKForObject(s.serializer.Scheme(), obj) - if err != nil { - return nil, err - } + // Set creationTimestamp if not already populated + t := obj.GetCreationTimestamp() + if t.IsZero() { + obj.SetCreationTimestamp(metav1.Now()) } - id := s.identify(obj) - if id == nil { - return nil, fmt.Errorf("couldn't identify object") + var objBytes bytes.Buffer + err := s.serializer.Encoder().Encode(serializer.NewFrameWriter(contentType, &objBytes), obj) + if err != nil { + return err } - return NewObjectKey(NewKindKey(gvk), id), nil + + return s.raw.Write(ctx, key, objBytes.Bytes()) } // RawStorage returns the RawStorage instance backing this Storage @@ -347,82 +409,86 @@ func (s *GenericStorage) Close() error { return nil // nothing to do here for GenericStorage } -// identify loops through the identifiers, in priority order, to identify the object correctly -func (s *GenericStorage) identify(obj runtime.Object) runtime.Identifyable { - for _, identifier := range s.identifiers { +// Scheme returns the scheme this client is using. +func (s *GenericStorage) Scheme() *kruntime.Scheme { + return s.serializer.Scheme() +} - id, ok := identifier.Identify(obj) - if ok { - return id - } - } +// RESTMapper returns the rest this client is using. For now, this returns nil, so don't use. +func (s *GenericStorage) RESTMapper() meta.RESTMapper { return nil } -func (s *GenericStorage) decode(key ObjectKey, content []byte) (runtime.Object, error) { - gvk := key.GetGVK() - // Decode the bytes to the internal version of the Object, if desired - isInternal := gvk.Version == kruntime.APIVersionInternal - - // Decode the bytes into an Object - ct := s.raw.ContentType(key) - logrus.Infof("Decoding with content type %s", ct) - obj, err := s.serializer.Decoder( - serializer.WithConvertToHubDecode(isInternal), - ).Decode(serializer.NewFrameReader(ct, serializer.FromBytes(content))) - if err != nil { - return nil, err +func createObject(gvk KindKey, scheme *kruntime.Scheme) NewObjectFunc { + return func() (Object, error) { + return NewObjectForGVK(gvk, scheme) } +} - // Cast to runtime.Object, and make sure it works - metaObj, ok := obj.(runtime.Object) - if !ok { - return nil, fmt.Errorf("can't convert to libgitops.runtime.Object") +func createPartialObject(gvk KindKey) NewObjectFunc { + return func() (Object, error) { + obj := &metav1.PartialObjectMetadata{} + obj.SetGroupVersionKind(gvk) + return obj, nil } - - // Set the desired gvk of this Object from the caller - metaObj.GetObjectKind().SetGroupVersionKind(gvk) - return metaObj, nil } -func (s *GenericStorage) decodeMeta(key ObjectKey, content []byte) (runtime.PartialObject, error) { - gvk := key.GetGVK() - partobjs, err := DecodePartialObjects(serializer.FromBytes(content), s.serializer.Scheme(), false, &gvk) - if err != nil { - return nil, err +func createUnstructuredObject(gvk KindKey) NewObjectFunc { + return func() (Object, error) { + obj := &unstructured.Unstructured{} + obj.SetGroupVersionKind(gvk) + return obj, nil } - - return partobjs[0], nil } -func (s *GenericStorage) walkKind(kind KindKey, fn func(key ObjectKey, content []byte) error) error { - keys, err := s.raw.List(kind) - if err != nil { - return err +func (s *GenericStorage) processKeys(ctx context.Context, keys []ObjectKey, filterOpts *filter.FilterOptions, fn NewObjectFunc, output chan Object) error { + wg := &sync.WaitGroup{} + wg.Add(len(keys)) + multiErr := &validation.MultiError{} // TODO: Thread-safe append + for _, k := range keys { + go func(key ObjectKey) { + defer wg.Done() + + // Create a new object, and decode into it using Get + obj, err := fn() + if err != nil { + multiErr.Errors = append(multiErr.Errors, err) + return + } + + if err := s.Get(ctx, key.NamespacedName(), obj); err != nil { + multiErr.Errors = append(multiErr.Errors, err) + return + } + + // Match the object against the filters + matched, err := filterOpts.Match(obj) + if err != nil { + multiErr.Errors = append(multiErr.Errors, err) + return + } + if !matched { + return + } + + output <- obj + }(k) } + wg.Wait() + // Close the output channel so that the for-range loop stops + close(output) - for _, key := range keys { - // Allow metadata.json to not exist, although the directory does exist - if !s.raw.Exists(key) { - continue - } - - content, err := s.raw.Read(key) - if err != nil { - return err - } - - if err := fn(key, content); err != nil { - return err - } + // TODO: upstream this + if len(multiErr.Errors) != 0 { + return multiErr } - return nil } // DecodePartialObjects reads any set of frames from the given ReadCloser, decodes the frames into // PartialObjects, validates that the decoded objects are known to the scheme, and optionally sets a default -// group +// group. +// TODO: Is this call relevant in the future? func DecodePartialObjects(rc io.ReadCloser, scheme *kruntime.Scheme, allowMultiple bool, defaultGVK *schema.GroupVersionKind) ([]runtime.PartialObject, error) { fr := serializer.NewYAMLFrameReader(rc) From d1f6dc02e7b9dbd89755f2427919423d02846d0c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 18 Jan 2021 23:30:12 +0200 Subject: [PATCH 025/149] Check in all the really core interfaces of the storage system --- pkg/storage/core/interfaces.go | 101 +++++++++++++++++++++++++++++++++ 1 file changed, 101 insertions(+) create mode 100644 pkg/storage/core/interfaces.go diff --git a/pkg/storage/core/interfaces.go b/pkg/storage/core/interfaces.go new file mode 100644 index 00000000..a60670e1 --- /dev/null +++ b/pkg/storage/core/interfaces.go @@ -0,0 +1,101 @@ +package core + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Note: package core must not depend on any other parts of the libgitops repo, possibly the serializer package as an exception. +// Anything under k8s.io/apimachinery goes though, and important external imports +// like github.com/spf13/afero is also ok. The pretty large sigs.k8s.io/controller-runtime +// import is a bit sub-optimal, though. + +// GroupVersionKind aliases +type GroupKind = schema.GroupKind +type GroupVersionKind = schema.GroupVersionKind + +// Client-related Object aliases +type Object = client.Object +type ObjectKey = types.NamespacedName +type ObjectList = client.ObjectList +type Patch = client.Patch + +// Client-related Option aliases +type ListOption = client.ListOption +type CreateOption = client.CreateOption +type UpdateOption = client.UpdateOption +type PatchOption = client.PatchOption +type DeleteOption = client.DeleteOption +type DeleteAllOfOption = client.DeleteAllOfOption + +// Helper functions from client. +var ObjectKeyFromObject = client.ObjectKeyFromObject + +// NamespaceEnforcer enforces a namespace policy for the Storage. +type NamespaceEnforcer interface { + // RequireSetNamespaceExists specifies whether the namespace must exist in the system. + // For example, Kubernetes requires this by default. + RequireSetNamespaceExists() bool + // EnforceNamespace operates on the object to make it conform with a given set of rules. + // If RequireNamespaceExists() is true, all the namespaces available in the system must + // be passed to namespaces. + // For example, Kubernetes enforces the following rules: + // Namespaced resources: + // If .metadata.namespace == "": .metadata.namespace = "default" + // If .metadata.namespace != "": Make sure there is such a namespace, and use it in that case + // Non-namespaced resources: + // If .metadata.namespace != "": .metadata.namespace = "" + EnforceNamespace(obj Object, namespaced bool, namespaces sets.String) error +} + +// Namespacer is an interface that lets the caller know if a GroupKind is namespaced +// or not. There are two ready-made implementations: +// 1. RESTMapperToNamespacer +// 2. NewStaticNamespacer +type Namespacer interface { + // IsNamespaced returns true if the GroupKind is a namespaced type + IsNamespaced(gk schema.GroupKind) (bool, error) +} + +// TODO: Investigate if the ObjectRecognizer should return unversioned +// or versioned ObjectID's +type ObjectRecognizer interface { + ResolveObjectID(ctx context.Context, fileName string, content []byte) (ObjectID, error) +} + +// UnversionedObjectID represents an ID for an Object whose version is not known. +// However, the Group, Kind, Name and optionally, Namespace is known and should +// uniquely identify the Object at a specific moment in time. +type UnversionedObjectID interface { + GroupKind() GroupKind + ObjectKey() ObjectKey + + WithVersion(version string) ObjectID +} + +// ObjectID is a superset of UnversionedObjectID, that also specifies an exact version. +type ObjectID interface { + UnversionedObjectID + + GroupVersionKind() GroupVersionKind +} + +type VersionRef interface { + IsZero() bool + String() string + Type() VersionRefType +} + +// VersionRefType specifies if the VersionRef is a commit (i.e. a read-only snapshot), or +// a writable branch. The terminology here is similar to that of Git, so people feel familiar +// with the concepts, but there is not requirement to use Git. +type VersionRefType int + +const ( + VersionRefTypeCommit VersionRefType = 1 + iota + VersionRefTypeBranch +) From f50276b7b8de15dd8cdd223348aa107b1e14cf7b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 18 Jan 2021 23:31:54 +0200 Subject: [PATCH 026/149] Check in a filesystem abstraction layer that will be used across the system. --- pkg/storage/core/afero.go | 78 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 pkg/storage/core/afero.go diff --git a/pkg/storage/core/afero.go b/pkg/storage/core/afero.go new file mode 100644 index 00000000..68229e1d --- /dev/null +++ b/pkg/storage/core/afero.go @@ -0,0 +1,78 @@ +package core + +import ( + "context" + "os" + "path/filepath" + + "github.com/spf13/afero" +) + +// AferoContext extends afero.Fs and afero.Afero with contexts added to every method. +type AferoContext interface { + // Members of afero.Fs + + // MkdirAll creates a directory path and all parents that does not exist + // yet. + MkdirAll(ctx context.Context, path string, perm os.FileMode) error + // Remove removes a file identified by name, returning an error, if any + // happens. + Remove(ctx context.Context, name string) error + // Stat returns a FileInfo describing the named file, or an error, if any + // happens. + Stat(ctx context.Context, name string) (os.FileInfo, error) + + // Members of afero.Afero + + ReadDir(ctx context.Context, dirname string) ([]os.FileInfo, error) + + Exists(ctx context.Context, path string) (bool, error) + + ReadFile(ctx context.Context, filename string) ([]byte, error) + + WriteFile(ctx context.Context, filename string, data []byte, perm os.FileMode) error + + Walk(ctx context.Context, root string, walkFn filepath.WalkFunc) error +} + +// AferoWithoutContext wraps an underlying afero.Fs without context knowledge, +// in a AferoContext-compliant implementation. +func AferoWithoutContext(fs afero.Fs) AferoContext { + return &aferoWithoutCtx{fs} +} + +type aferoWithoutCtx struct { + fs afero.Fs +} + +func (a *aferoWithoutCtx) MkdirAll(_ context.Context, path string, perm os.FileMode) error { + return a.fs.MkdirAll(path, perm) +} + +func (a *aferoWithoutCtx) Remove(_ context.Context, name string) error { + return a.fs.Remove(name) +} + +func (a *aferoWithoutCtx) Stat(_ context.Context, name string) (os.FileInfo, error) { + return a.fs.Stat(name) +} + +func (a *aferoWithoutCtx) ReadDir(_ context.Context, dirname string) ([]os.FileInfo, error) { + return afero.ReadDir(a.fs, dirname) +} + +func (a *aferoWithoutCtx) Exists(_ context.Context, path string) (bool, error) { + return afero.Exists(a.fs, path) +} + +func (a *aferoWithoutCtx) ReadFile(_ context.Context, filename string) ([]byte, error) { + return afero.ReadFile(a.fs, filename) +} + +func (a *aferoWithoutCtx) WriteFile(_ context.Context, filename string, data []byte, perm os.FileMode) error { + return afero.WriteFile(a.fs, filename, data, perm) +} + +func (a *aferoWithoutCtx) Walk(_ context.Context, root string, walkFn filepath.WalkFunc) error { + return afero.Walk(a.fs, root, walkFn) +} From 51f6a884e3f372ff563de20fe48d364bc80b172a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 18 Jan 2021 23:36:51 +0200 Subject: [PATCH 027/149] format.go contains common interfaces and implementations of content-type and file-extension lookups --- pkg/storage/core/format.go | 79 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 79 insertions(+) create mode 100644 pkg/storage/core/format.go diff --git a/pkg/storage/core/format.go b/pkg/storage/core/format.go new file mode 100644 index 00000000..12c27a45 --- /dev/null +++ b/pkg/storage/core/format.go @@ -0,0 +1,79 @@ +package core + +import ( + "context" + "errors" + "fmt" + "path/filepath" + + "github.com/weaveworks/libgitops/pkg/serializer" +) + +var ( + ErrCannotDetermineContentType = errors.New("cannot determine content type") + ErrUnrecognizedContentType = errors.New("unrecognized content type") +) + +// ContentTyper resolves the Content Type of a file given its path and the afero +// filesystem abstraction, so that it is possible to even examine the file if needed +// for making the judgement. See DefaultContentTyper for a sample implementation. +type ContentTyper interface { + // ContentTypeForPath should return the content type for the file that exists in + // the given AferoContext (path is relative). If the content type cannot be determined + // please return a wrapped ErrCannotDetermineContentType error. + ContentTypeForPath(ctx context.Context, fs AferoContext, path string) (serializer.ContentType, error) +} + +// DefaultContentTypes describes the default connection between +// file extensions and a content types. +var DefaultContentTyper ContentTyper = ContentTypeForExtension{ + ".json": serializer.ContentTypeJSON, + ".yaml": serializer.ContentTypeYAML, + ".yml": serializer.ContentTypeYAML, +} + +// ContentTypeForExtension implements the ContentTyper interface +// by looking up the extension of the given path in ContentTypeForPath +// matched against the key of the map. The extension in the map key +// must start with a dot, e.g. ".json". The value of the map contains +// the corresponding content type. There might be many extensions which +// map to the same content type, e.g. both ".yaml" -> ContentTypeYAML +// and ".yml" -> ContentTypeYAML. +type ContentTypeForExtension map[string]serializer.ContentType + +func (m ContentTypeForExtension) ContentTypeForPath(ctx context.Context, _ AferoContext, path string) (serializer.ContentType, error) { + ct, ok := m[filepath.Ext(path)] + if !ok { + return serializer.ContentType(""), fmt.Errorf("%w for file %q", ErrCannotDetermineContentType, path) + } + return ct, nil +} + +// FileExtensionResolver knows how to resolve what file extension to use for +// a given ContentType. +type FileExtensionResolver interface { + // ContentTypeExtension returns the file extension for the given ContentType. + // The returned string MUST start with a dot, e.g. ".json". If the given + // ContentType is not known, it is recommended to return a wrapped + // ErrUnrecognizedContentType. + ExtensionForContentType(ct serializer.ContentType) (string, error) +} + +// DefaultFileExtensionResolver describes a default connection between +// the file extensions and ContentTypes , namely JSON -> ".json" and +// YAML -> ".yaml". +var DefaultFileExtensionResolver FileExtensionResolver = ExtensionForContentType{ + serializer.ContentTypeJSON: ".json", + serializer.ContentTypeYAML: ".yaml", +} + +// ExtensionForContentType is a simple map implementation of FileExtensionResolver. +type ExtensionForContentType map[serializer.ContentType]string + +func (m ExtensionForContentType) ExtensionForContentType(ct serializer.ContentType) (string, error) { + ext, ok := m[ct] + if !ok { + return "", fmt.Errorf("%q: %q", ErrUnrecognizedContentType, ct) + } + return ext, nil +} From 832424764f15198993c16575a98ec5fb18ec6011 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 18 Jan 2021 23:37:58 +0200 Subject: [PATCH 028/149] Implementations for (Unversioned)ObjectID --- pkg/storage/core/objectid.go | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 pkg/storage/core/objectid.go diff --git a/pkg/storage/core/objectid.go b/pkg/storage/core/objectid.go new file mode 100644 index 00000000..8dc747be --- /dev/null +++ b/pkg/storage/core/objectid.go @@ -0,0 +1,29 @@ +package core + +import "k8s.io/apimachinery/pkg/runtime/schema" + +// NewUnversionedObjectID creates a new UnversionedObjectID from the given GroupKind and ObjectKey. +func NewUnversionedObjectID(gk GroupKind, key ObjectKey) UnversionedObjectID { + return unversionedObjectID{gk, key} +} + +type unversionedObjectID struct { + gk GroupKind + key ObjectKey +} + +func (o unversionedObjectID) GroupKind() GroupKind { return o.gk } +func (o unversionedObjectID) ObjectKey() ObjectKey { return o.key } +func (o unversionedObjectID) WithVersion(version string) ObjectID { return objectID{o, version} } + +// NewObjectID creates a new ObjectID from the given GroupVersionKind and ObjectKey. +func NewObjectID(gvk GroupVersionKind, key ObjectKey) ObjectID { + return objectID{unversionedObjectID{gvk.GroupKind(), key}, gvk.Version} +} + +type objectID struct { + unversionedObjectID + version string +} + +func (o objectID) GroupVersionKind() schema.GroupVersionKind { return o.gk.WithVersion(o.version) } From a039e08c0b059200907bbecb31b12fd530bc162c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 18 Jan 2021 23:39:42 +0200 Subject: [PATCH 029/149] A set of common error creation and check functions --- pkg/storage/core/errors.go | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 pkg/storage/core/errors.go diff --git a/pkg/storage/core/errors.go b/pkg/storage/core/errors.go new file mode 100644 index 00000000..da0c97d7 --- /dev/null +++ b/pkg/storage/core/errors.go @@ -0,0 +1,38 @@ +package core + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +// StatusError is an error that supports also conversion +// to a metav1.Status struct for more detailed information. +type StatusError interface { + error + errors.APIStatus +} + +func NewErrNotFound(id UnversionedObjectID) StatusError { + return errors.NewNotFound(schema.GroupResource{ + Group: id.GroupKind().Group, + Resource: id.GroupKind().Kind, + }, id.ObjectKey().Name) +} + +func NewErrAlreadyExists(id UnversionedObjectID) StatusError { + return errors.NewAlreadyExists(schema.GroupResource{ + Group: id.GroupKind().Group, + Resource: id.GroupKind().Kind, + }, id.ObjectKey().Name) +} + +func NewErrInvalid(id UnversionedObjectID, errs field.ErrorList) StatusError { + return errors.NewInvalid(id.GroupKind(), id.ObjectKey().Name, errs) +} + +var ( + IsErrNotFound = errors.IsNotFound + IsErrAlreadyExists = errors.IsAlreadyExists + IsErrInvalid = errors.IsInvalid +) From eb6af4ca3799340ddb506708cac5e136a35efba1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 18 Jan 2021 23:44:31 +0200 Subject: [PATCH 030/149] Implement a default ObjectRecognizer --- pkg/storage/core/recognizer.go | 39 ++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 pkg/storage/core/recognizer.go diff --git a/pkg/storage/core/recognizer.go b/pkg/storage/core/recognizer.go new file mode 100644 index 00000000..b837e54d --- /dev/null +++ b/pkg/storage/core/recognizer.go @@ -0,0 +1,39 @@ +package core + +import ( + "context" + "errors" + + "github.com/weaveworks/libgitops/pkg/serializer" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// SerializerObjectRecognizer implements ObjectRecognizer. +var _ ObjectRecognizer = &SerializerObjectRecognizer{} + +// SerializerObjectRecognizer is a simple implementation of ObjectRecognizer, that +// decodes the given byte content with the assumption that it is YAML (which covers +// both YAML and JSON formats) into a *metav1.PartialObjectMetadata, which allows +// extracting the ObjectID from any Kubernetes API Machinery-compatible Object. +// +// This operation works even though *metav1.PartialObjectMetadata is not registered +// with the underlying Scheme in any way. +type SerializerObjectRecognizer struct { + // Serializer is a required field in order for ResolveObjectID to function. + Serializer serializer.Serializer +} + +func (r *SerializerObjectRecognizer) ResolveObjectID(_ context.Context, _ string, content []byte) (ObjectID, error) { + if r.Serializer == nil { + return nil, errors.New("programmer error: SerializerObjectRecognizer.Serializer is nil") + } + metaObj := &metav1.PartialObjectMetadata{} + err := r.Serializer.Decoder().DecodeInto( + serializer.NewSingleFrameReader(content, serializer.ContentTypeYAML), + metaObj, + ) + if err != nil { + return nil, err + } + return NewObjectID(metaObj.GroupVersionKind(), ObjectKeyFromObject(metaObj)), nil +} From 6de1f2c6d9d54c15c01ddb88007d9bfda71c7395 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 18 Jan 2021 23:45:32 +0200 Subject: [PATCH 031/149] Implement a sample (and simple) Namespacer that statically determines if the object is namespaced or not. --- pkg/storage/core/namespaces.go | 37 ++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 pkg/storage/core/namespaces.go diff --git a/pkg/storage/core/namespaces.go b/pkg/storage/core/namespaces.go new file mode 100644 index 00000000..d0929f56 --- /dev/null +++ b/pkg/storage/core/namespaces.go @@ -0,0 +1,37 @@ +package core + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// StaticNamespacer implements Namespacer +var _ Namespacer = StaticNamespacer{} + +// StaticNamespacer has a default policy, which is that objects are in general namespaced +// (NamespacedIsDefaultPolicy == true), or that they are in general root-scoped +// (NamespacedIsDefaultPolicy == false). +// +// To the default policy, Exceptions can be added, so that for that GroupKind, the default +// policy is reversed. +type StaticNamespacer struct { + NamespacedIsDefaultPolicy bool + Exceptions []schema.GroupKind +} + +func (n StaticNamespacer) IsNamespaced(gk schema.GroupKind) (bool, error) { + if n.NamespacedIsDefaultPolicy { + // namespace by default, the gks list is a list of root-scoped entities + return !n.gkIsException(gk), nil + } + // root by default, the gks in the list are namespaced + return n.gkIsException(gk), nil +} + +func (n StaticNamespacer) gkIsException(target schema.GroupKind) bool { + for _, gk := range n.Exceptions { + if gk == target { + return true + } + } + return false +} From 53be24cd9c034aa442e2ff403e7f7b23f1a5a08a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 18 Jan 2021 23:51:05 +0200 Subject: [PATCH 032/149] Add a definition and sample implementation of PathExcluder. --- pkg/storage/core/path_excluder.go | 43 ++++++++++++++++ pkg/storage/core/path_excluder_test.go | 69 ++++++++++++++++++++++++++ 2 files changed, 112 insertions(+) create mode 100644 pkg/storage/core/path_excluder.go create mode 100644 pkg/storage/core/path_excluder_test.go diff --git a/pkg/storage/core/path_excluder.go b/pkg/storage/core/path_excluder.go new file mode 100644 index 00000000..3b51e52a --- /dev/null +++ b/pkg/storage/core/path_excluder.go @@ -0,0 +1,43 @@ +package core + +import ( + "context" + "path/filepath" +) + +// PathExcluder is an interface that lets the user implement custom policies +// for whether a given relative path to a given directory (fs is scoped at +// that directory) should be considered for an operation (e.g. inotify watch +// or file search). +type PathExcluder interface { + // ShouldExcludePath takes in a context, the fs filesystem abstraction, + // and a relative path to the file which should be determined if it should + // be excluded or not. + ShouldExcludePath(ctx context.Context, fs AferoContext, path string) bool +} + +// ExcludeGitDirectory implements PathExcluder. +var _ PathExcluder = ExcludeGitDirectory{} + +// ExcludeGitDirectory is a sample implementation of PathExcluder, that excludes +// all files under a ".git" directory, anywhere in the tree under the root directory. +type ExcludeGitDirectory struct{} + +func (ExcludeGitDirectory) ShouldExcludePath(_ context.Context, _ AferoContext, path string) bool { + // Always start from a clean path + path = filepath.Clean(path) + for { + // get the current base entry name + baseName := filepath.Base(path) + // This means path is now an empty string; we did not find a .git directory anywhere + if baseName == "." { + return false + } + // We possibly found a directory named git; this path should be excluded + if baseName == ".git" { + return true + } + // "go up" one directory for the next iteration + path = filepath.Dir(path) + } +} diff --git a/pkg/storage/core/path_excluder_test.go b/pkg/storage/core/path_excluder_test.go new file mode 100644 index 00000000..8121ac89 --- /dev/null +++ b/pkg/storage/core/path_excluder_test.go @@ -0,0 +1,69 @@ +package core + +import ( + "context" + "testing" +) + +func TestExcludeGitDirectory_ShouldExcludePath(t *testing.T) { + tests := []struct { + name string + path string + want bool + }{ + { + name: "normal", + path: ".git", + want: true, + }, + { + name: "with relative path", + path: "./.git", + want: true, + }, + { + name: "with many parents", + path: "/foo/bar/.git", + want: true, + }, + { + name: "with many children", + path: ".git/foo/bar/baz", + want: true, + }, + { + name: "with parents and children", + path: "./foo/bar/.git/baz/bar", + want: true, + }, + { + name: "empty", + path: "", + want: false, + }, + { + name: "local dir", + path: ".", + want: false, + }, + { + name: "other prefix", + path: "foo.git", + want: false, + }, + { + name: "other suffix", + path: ".gitea", + want: false, + }, + } + e := ExcludeGitDirectory{} + ctx := context.Background() + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := e.ShouldExcludePath(ctx, nil, tt.path); got != tt.want { + t.Errorf("ExcludeGitDirectory.ShouldExcludePath() = %v, want %v", got, tt.want) + } + }) + } +} From 15fadfc005a4a9ad8c94df99c6333e2476087e3c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 18 Jan 2021 23:52:30 +0200 Subject: [PATCH 033/149] Add sample Namespacer and NamespaceEnforcer implementations for Kubernetes-style operations. --- pkg/storage/kube/namespaces.go | 115 +++++++++++++++++++++++++++++++++ 1 file changed, 115 insertions(+) create mode 100644 pkg/storage/kube/namespaces.go diff --git a/pkg/storage/kube/namespaces.go b/pkg/storage/kube/namespaces.go new file mode 100644 index 00000000..8a544571 --- /dev/null +++ b/pkg/storage/kube/namespaces.go @@ -0,0 +1,115 @@ +package kube + +import ( + "errors" + "fmt" + "sync" + + "github.com/weaveworks/libgitops/pkg/storage/core" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" +) + +// TODO: Make an example component that iterates through all of a raw.Storage's +// or FileFinder's objects, and just reads them, converts them into the current +// hub version. + +var ( + // ErrNoSuchNamespace means that the set of namespaces was searched in the + // system, but the requested namespace wasn't in that list. + ErrNoSuchNamespace = errors.New("no such namespace in the system") +) + +// NamespaceEnforcer implements core.NamespaceEnforcer similarly to how the +// Kubernetes API server behaves. +type NamespaceEnforcer struct{} + +var _ core.NamespaceEnforcer = NamespaceEnforcer{} + +func (NamespaceEnforcer) RequireSetNamespaceExists() bool { return true } + +func (NamespaceEnforcer) EnforceNamespace(obj core.Object, namespaced bool, namespaces sets.String) error { + ns := obj.GetNamespace() + if !namespaced { + // If a namespace was set, it should be sanitized. + if len(ns) != 0 { + obj.SetNamespace("") + } + return nil + } + // The resource is namespaced. + // If it is empty, set it to the default namespace. + if len(ns) == 0 { + obj.SetNamespace(metav1.NamespaceDefault) + return nil + } + // If the namespace field is set, but it doesn't exist in the set, error + if !namespaces.Has(ns) { + return fmt.Errorf("%w: %q", ErrNoSuchNamespace, ns) + } + return nil +} + +// SimpleRESTMapper is a subset of the meta.RESTMapper interface +type SimpleRESTMapper interface { + // RESTMapping identifies a preferred resource mapping for the provided group kind. + RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) +} + +// RESTMapperToNamespacer implements the Namespacer interface by fetching (and caching) data +// from the given RESTMapper interface, that is compatible with any meta.RESTMapper implementation. +// This allows you to e.g. pass in a meta.RESTMapper yielded from +// sigs.k8s.io/controller-runtime/pkg/client/apiutil.NewDiscoveryRESTMapper(c *rest.Config), or +// k8s.io/client-go/restmapper.NewDiscoveryRESTMapper(groups []*restmapper.APIGroupResources) +// in order to look up namespacing information from either a running API server, or statically, from +// the list of restmapper.APIGroupResources. +func RESTMapperToNamespacer(mapper SimpleRESTMapper) core.Namespacer { + return &restNamespacer{ + mapper: mapper, + mappingByType: make(map[schema.GroupKind]*meta.RESTMapping), + mu: &sync.RWMutex{}, + } +} + +var _ core.Namespacer = &restNamespacer{} + +type restNamespacer struct { + mapper SimpleRESTMapper + + mappingByType map[schema.GroupKind]*meta.RESTMapping + mu *sync.RWMutex +} + +func (n *restNamespacer) IsNamespaced(gk schema.GroupKind) (bool, error) { + m, err := n.getMapping(gk) + if err != nil { + return false, err + } + return mappingNamespaced(m), nil +} + +func (n *restNamespacer) getMapping(gk schema.GroupKind) (*meta.RESTMapping, error) { + n.mu.RLock() + mapping, ok := n.mappingByType[gk] + n.mu.RUnlock() + // If already cached, we're ok + if ok { + return mapping, nil + } + + // Write the mapping info to our cache + n.mu.Lock() + defer n.mu.Unlock() + m, err := n.mapper.RESTMapping(gk) + if err != nil { + return nil, err + } + n.mappingByType[gk] = m + return m, nil +} + +func mappingNamespaced(mapping *meta.RESTMapping) bool { + return mapping.Scope.Name() == meta.RESTScopeNameNamespace +} From fb40319d8d72f18b6faeb6b05c89e0767974701a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 18 Jan 2021 23:53:37 +0200 Subject: [PATCH 034/149] Register removed files that were rewritten and moved to pkg/storage/core --- pkg/storage/format.go | 20 ----- pkg/storage/key.go | 52 ------------ pkg/storage/namespaces.go | 168 -------------------------------------- 3 files changed, 240 deletions(-) delete mode 100644 pkg/storage/format.go delete mode 100644 pkg/storage/key.go delete mode 100644 pkg/storage/namespaces.go diff --git a/pkg/storage/format.go b/pkg/storage/format.go deleted file mode 100644 index f5756592..00000000 --- a/pkg/storage/format.go +++ /dev/null @@ -1,20 +0,0 @@ -package storage - -import "github.com/weaveworks/libgitops/pkg/serializer" - -// ContentTypes describes the connection between -// file extensions and a content types. -var ContentTypes = map[string]serializer.ContentType{ - ".json": serializer.ContentTypeJSON, - ".yaml": serializer.ContentTypeYAML, - ".yml": serializer.ContentTypeYAML, -} - -var extToContentType = map[serializer.ContentType]string{ - serializer.ContentTypeJSON: ".json", - serializer.ContentTypeYAML: ".yaml", -} - -func extForContentType(wanted serializer.ContentType) string { - return extToContentType[wanted] -} diff --git a/pkg/storage/key.go b/pkg/storage/key.go deleted file mode 100644 index fb4c0ab0..00000000 --- a/pkg/storage/key.go +++ /dev/null @@ -1,52 +0,0 @@ -package storage - -import ( - "errors" - "fmt" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// Aliases -type Object = client.Object -type ObjectList = client.ObjectList -type KindKey = schema.GroupVersionKind -type NamespacedName = types.NamespacedName -type Patch = client.Patch - -var ErrNoMetadata = errors.New("it is required to embed ObjectMeta into the serialized API type") - -type ObjectKey interface { - Kind() KindKey - NamespacedName() NamespacedName -} - -// objectKey implements ObjectKey. -var _ ObjectKey = &objectKey{} - -type objectKey struct { - kind KindKey - name NamespacedName -} - -func (key objectKey) Kind() KindKey { return key.kind } -func (key objectKey) NamespacedName() NamespacedName { return key.name } - -func NewObjectKey(kind KindKey, name NamespacedName) ObjectKey { - return objectKey{kind, name} -} - -func NewObjectForGVK(kind KindKey, scheme *runtime.Scheme) (Object, error) { - kobj, err := scheme.New(kind) - if err != nil { - return nil, err - } - obj, ok := kobj.(Object) - if !ok { - return nil, fmt.Errorf("%w: %s", ErrNoMetadata, kind) - } - return obj, nil -} diff --git a/pkg/storage/namespaces.go b/pkg/storage/namespaces.go deleted file mode 100644 index 75b9a1b4..00000000 --- a/pkg/storage/namespaces.go +++ /dev/null @@ -1,168 +0,0 @@ -package storage - -import ( - "errors" - "fmt" - "sync" - - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/sets" -) - -var ( - // ErrNoSuchNamespace means that the set of namespaces was searched in the - // system, but the requested namespace wasn't in that list. - ErrNoSuchNamespace = errors.New("no such namespace in the system") -) - -// NamespaceEnforcer enforces a namespace policy for the Storage. -type NamespaceEnforcer interface { - // RequireNamespaceExists specifies whether the namespace must exist in the system. - // For example, Kubernetes requires this by default. - RequireNamespaceExists() bool - // EnforceNamespace operates on the object to make it conform with a given set of rules. - // If RequireNamespaceExists() is true, all the namespaces available in the system must - // be passed to namespaces. - // For example, Kubernetes enforces the following rules: - // Namespaced resources: - // If .metadata.namespace == "": .metadata.namespace = "default" - // If .metadata.namespace != "": Make sure there is such a namespace, and use it in that case - // Non-namespaced resources: - // If .metadata.namespace != "": .metadata.namespace = "" - EnforceNamespace(obj Object, namespaced bool, namespaces sets.String) error -} - -// K8sNamespaceEnforcer implements NamespaceEnforcer similarly to how the API server behaves. -type K8sNamespaceEnforcer struct{} - -var _ NamespaceEnforcer = K8sNamespaceEnforcer{} - -func (K8sNamespaceEnforcer) RequireNamespaceExists() bool { return true } - -func (K8sNamespaceEnforcer) EnforceNamespace(obj Object, namespaced bool, namespaces sets.String) error { - ns := obj.GetNamespace() - if !namespaced { - // If a namespace was set, it should be sanitized. - if len(ns) != 0 { - obj.SetNamespace("") - } - return nil - } - // The resource is namespaced. - // If it is empty, set it to the default namespace. - if len(ns) == 0 { - obj.SetNamespace(metav1.NamespaceDefault) - return nil - } - // If the namespace field is set, but it doesn't exist in the set, error - if !namespaces.Has(ns) { - return fmt.Errorf("%w: %q", ErrNoSuchNamespace, ns) - } - return nil -} - -// Namespacer is an interface that lets the caller know if a GroupKind is namespaced -// or not. There are two ready-made implementations: -// 1. RESTMapperToNamespacer -// 2. NewStaticNamespacer -type Namespacer interface { - // IsNamespaced returns true if the GroupKind is a namespaced type - IsNamespaced(gk schema.GroupKind) (bool, error) -} - -// RESTMapper is a subset of the meta.RESTMapper interface -type RESTMapper interface { - // RESTMapping identifies a preferred resource mapping for the provided group kind. - RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) -} - -// RESTMapperToNamespacer implements the Namespacer interface by fetching (and caching) data -// from the given RESTMapper interface, that is compatible with any meta.RESTMapper implementation. -// This allows you to e.g. pass in a meta.RESTMapper yielded from -// sigs.k8s.io/controller-runtime/pkg/client/apiutil.NewDiscoveryRESTMapper(c *rest.Config), or -// k8s.io/client-go/restmapper.NewDiscoveryRESTMapper(groups []*restmapper.APIGroupResources) -// in order to look up namespacing information from either a running API server, or statically, from -// the list of restmapper.APIGroupResources. -func RESTMapperToNamespacer(mapper RESTMapper) Namespacer { - return &restNamespacer{ - mapper: mapper, - mappingByType: make(map[schema.GroupKind]*meta.RESTMapping), - mu: &sync.RWMutex{}, - } -} - -var _ Namespacer = &restNamespacer{} - -type restNamespacer struct { - mapper RESTMapper - - mappingByType map[schema.GroupKind]*meta.RESTMapping - mu *sync.RWMutex -} - -func (n *restNamespacer) IsNamespaced(gk schema.GroupKind) (bool, error) { - m, err := n.getMapping(gk) - if err != nil { - return false, err - } - return mappingNamespaced(m), nil -} - -func (n *restNamespacer) getMapping(gk schema.GroupKind) (*meta.RESTMapping, error) { - n.mu.RLock() - mapping, ok := n.mappingByType[gk] - n.mu.RUnlock() - // If already cached, we're ok - if ok { - return mapping, nil - } - - // Write the mapping info to our cache - n.mu.Lock() - defer n.mu.Unlock() - m, err := n.mapper.RESTMapping(gk) - if err != nil { - return nil, err - } - n.mappingByType[gk] = m - return m, nil -} - -func mappingNamespaced(mapping *meta.RESTMapping) bool { - return mapping.Scope.Name() == meta.RESTScopeNameNamespace -} - -// NewStaticNamespacer has a default policy, which is that objects are in general namespaced -// (defaultToNamespaced == true), or that they are in general root-scoped (defaultToNamespaced == false). -// To the default policy, exceptions can be added, so that for that GroupKind, the default -// policy is reversed. -func NewStaticNamespacer(defaultToNamespaced bool, exceptions ...schema.GroupKind) Namespacer { - return &staticNamespacedInfo{defaultToNamespaced, exceptions} -} - -var _ Namespacer = &staticNamespacedInfo{} - -type staticNamespacedInfo struct { - defaultToNamespaced bool - exceptions []schema.GroupKind -} - -func (n *staticNamespacedInfo) IsNamespaced(gk schema.GroupKind) (bool, error) { - if n.defaultToNamespaced { - // namespace by default, the gks list is a list of root-scoped entities - return !n.gkIsException(gk), nil - } - // root by default, the gks in the list are namespaced - return n.gkIsException(gk), nil -} - -func (n *staticNamespacedInfo) gkIsException(target schema.GroupKind) bool { - for _, gk := range n.exceptions { - if gk == target { - return true - } - } - return false -} From f161c9668e66b5f54f0aaaeb1227969459b47c04 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Tue, 19 Jan 2021 01:00:26 +0200 Subject: [PATCH 035/149] Check in the main raw.Storage-related interfaces. --- pkg/storage/raw/interfaces.go | 180 ++++++++++++++++++++++++++++++++++ 1 file changed, 180 insertions(+) create mode 100644 pkg/storage/raw/interfaces.go diff --git a/pkg/storage/raw/interfaces.go b/pkg/storage/raw/interfaces.go new file mode 100644 index 00000000..cea714ee --- /dev/null +++ b/pkg/storage/raw/interfaces.go @@ -0,0 +1,180 @@ +package raw + +import ( + "context" + + "github.com/weaveworks/libgitops/pkg/serializer" + "github.com/weaveworks/libgitops/pkg/storage/core" +) + +// Storage is a Key-indexed low-level interface to +// store byte-encoded Objects (resources) in non-volatile +// memory. +// +// This Storage operates entirely on GroupKinds; without enforcing +// a specific version of the encoded data format. This is possible +// with the assumption that any older format stored at disk can be +// read successfully and converted into a more recent version. +// +// TODO: Add thread-safety so it is not possible to issue a Write() or Delete() +// at the same time as any other read operation. +type Storage interface { + Reader + Writer +} + +// Accessors allows access to lower-level interfaces needed by Storage. +type Accessors interface { + // Namespacer gives access to the namespacer that is used + Namespacer() core.Namespacer + // Filesystem gets the underlying filesystem abstraction, if + // applicable. + Filesystem() core.AferoContext +} + +// Reader provides the read operations for the Storage. +type Reader interface { + Accessors + + // Read operations + + // Read returns a resource's content based on the ID. + // If the resource does not exist, it returns core.NewErrNotFound. + Read(ctx context.Context, id core.UnversionedObjectID) ([]byte, error) + // Stat returns information about the object, e.g. checksum, + // content type, and possibly, path on disk (in the case of + // FilesystemStorage), or core.NewErrNotFound if not found + Stat(ctx context.Context, id core.UnversionedObjectID) (ObjectInfo, error) + // Exists checks if the resource indicated by the ID exists. It is + // a shorthand for running Stat() and checking that error was nil. + Exists(ctx context.Context, id core.UnversionedObjectID) bool + // Checksum returns the ContentType. This operation must function + // also before the Object with the given id exists in the system, + // in order to support creating new Objects. + ContentType(ctx context.Context, id core.UnversionedObjectID) (serializer.ContentType, error) + + // List operations + + // List returns all matching object keys based on the given KindKey. + // If the GroupKind is namespaced (according to the Namespacer), and + // namespace is empty: all namespaces are searched. If namespace in + // that case is set; only that namespace is searched. If the GroupKind + // is non-namespaced, and namespace is non-empty, an error is returned. + // TODO: Make this return []core.UnversionedObjectID instead? + List(ctx context.Context, gk core.GroupKind, namespace string) ([]core.ObjectKey, error) +} + +// ObjectInfo is the return value from Storage.Stat(). It provides the +// user with information about the given Object, e.g. its ContentType, +// a checksum, and its relative path on disk, if the Storage is a +// FilesystemStorage. +type ObjectInfo interface { + // ContentTyped returns the ContentType of the Object when stored. + serializer.ContentTyped + // ChecksumContainer knows how to retrieve the checksum of the file. + ChecksumContainer + // Path is the relative path between the AferoContext root dir and + // the Stat'd file. + Path() string + // ID returns the ID for the given Object. + ID() core.UnversionedObjectID +} + +// ChecksumContainer is an interface for exposing a checksum. +// +// What the checksum is is application-dependent, however, it +// should be the same for two invocations, as long as the stored +// data is the same. It might change over time although the +// underlying data did not. Examples of checksums that can be +// used is: the file modification timestamp, a sha256sum of the +// file content, or the latest Git commit when the file was +// changed. +// +// Look for documentation on the Storage you are using for more +// details on what checksum algorithm is used. +type ChecksumContainer interface { + // Checksum returns the checksum of the file. + Checksum() string +} + +// Reader provides the write operations for the Storage. +type Writer interface { + Accessors + + // Write operations + + // Write writes the given content to the resource indicated by the ID. + // Error returns are implementation-specific. + Write(ctx context.Context, id core.UnversionedObjectID, content []byte) error + // Delete deletes the resource indicated by the ID. + // If the resource does not exist, it returns ErrNotFound. + Delete(ctx context.Context, id core.UnversionedObjectID) error +} + +// FilesystemStorage extends Storage by specializing it to operate in a +// filesystem context, and in other words use a FileFinder to locate the +// files to operate on. +type FilesystemStorage interface { + Storage + + // RootDirectory returns the root directory of this FilesystemStorage. + RootDirectory() string + // FileFinder returns the underlying FileFinder used. + FileFinder() FileFinder +} + +// FileFinder is a generic implementation for locating files on disk, to be +// used by a FilesystemStorage. +type FileFinder interface { + // FileFinder must be able to provide a ContentType for a path, although + // that path might not exist (i.e. in a create operation). + core.ContentTyper + + // ObjectPath gets the file path relative to the root directory. + // In order to support a create operation, this function must also return a valid path for + // files that do not yet exist on disk. + ObjectPath(ctx context.Context, fs core.AferoContext, id core.UnversionedObjectID, namespaced bool) (string, error) + // ObjectAt retrieves the ID based on the given relative file path to fs. + ObjectAt(ctx context.Context, fs core.AferoContext, path string) (core.UnversionedObjectID, error) + + // ListNamespaces lists the available namespaces for the given GroupKind + // This function shall only be called for namespaced objects, it is up to + // the caller to make sure they do not call this method for root-spaced + // objects; for that the behavior is undefined (but returning an error + // is recommended). + ListNamespaces(ctx context.Context, fs core.AferoContext, gk core.GroupKind) ([]string, error) + // ListObjectKeys returns a list of names (with optionally, the namespace). + // For namespaced GroupKinds, the caller must provide a namespace, and for + // root-spaced GroupKinds, the caller must not. When namespaced, this function + // must only return object keys for that given namespace. + // TODO: Make this return []core.UnversionedObjectID instead? + ListObjectKeys(ctx context.Context, fs core.AferoContext, gk core.GroupKind, namespace string) ([]core.ObjectKey, error) +} + +// MappedFileFinder is an extension to FileFinder that allows it to have an internal +// cache with mappings between UnversionedObjectID and a ChecksumPath. This allows +// higher-order interfaces to manage Objects in files in an unorganized directory +// (e.g. a Git repo). +type MappedFileFinder interface { + FileFinder + + // GetMapping retrieves a mapping in the system. + GetMapping(ctx context.Context, id core.UnversionedObjectID) (ChecksumPath, bool) + // SetMapping binds an ID to a physical file path. This operation overwrites + // any previous mapping for id. + SetMapping(ctx context.Context, id core.UnversionedObjectID, checksumPath ChecksumPath) + // SetMappings replaces all mappings at once to the ones in m. + SetMappings(ctx context.Context, m map[core.UnversionedObjectID]ChecksumPath) + // DeleteMapping removes the mapping for the given id. + DeleteMapping(ctx context.Context, id core.UnversionedObjectID) +} + +// ChecksumPath is a tuple of a given Checksum and relative file Path, +// for use in MappedFileFinder. +type ChecksumPath struct { + // TODO: Implement ChecksumContainer, or make ChecksumPath a + // sub-interface of ObjectID? + Checksum string + // Note: path is relative to the AferoContext. + Path string +} From 29878419f271d067b20aea701d23c2f809a3d4ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Tue, 19 Jan 2021 01:02:55 +0200 Subject: [PATCH 036/149] Move and rewrite the GenericRawStorage implementation completely, now using the more detailed interfaces. There is now one generic raw.Storage part, and one specialized, but simple, FileFinder. --- pkg/storage/raw/filefinder_simple.go | 200 +++++++++++++++++ pkg/storage/raw/rawstorage.go | 239 ++++++++++++++++++++ pkg/storage/raw/rawstorage_options.go | 33 +++ pkg/storage/rawstorage.go | 303 -------------------------- pkg/storage/rawstorage_options.go | 42 ---- 5 files changed, 472 insertions(+), 345 deletions(-) create mode 100644 pkg/storage/raw/filefinder_simple.go create mode 100644 pkg/storage/raw/rawstorage.go create mode 100644 pkg/storage/raw/rawstorage_options.go delete mode 100644 pkg/storage/rawstorage.go delete mode 100644 pkg/storage/rawstorage_options.go diff --git a/pkg/storage/raw/filefinder_simple.go b/pkg/storage/raw/filefinder_simple.go new file mode 100644 index 00000000..0a053772 --- /dev/null +++ b/pkg/storage/raw/filefinder_simple.go @@ -0,0 +1,200 @@ +package raw + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/weaveworks/libgitops/pkg/serializer" + "github.com/weaveworks/libgitops/pkg/storage/core" +) + +// NewSimpleStorage is a default opinionated constructor for a FilesystemStorage +// using SimpleFileFinder as the FileFinder, and the local disk as target. +// If you need more advanced customizablility than provided here, you can compose +// the call to NewGenericFilesystemStorage yourself. +func NewSimpleStorage(dir string, ct serializer.ContentType, namespacer core.Namespacer) (FilesystemStorage, error) { + fileFinder := &SimpleFileFinder{ + // ContentType is optional; JSON is used by default + ContentType: ct, + } + // dir and namespacer are validated by NewGenericFilesystemStorage. + return NewGenericFilesystemStorage(dir, fileFinder, namespacer) +} + +var _ FileFinder = &SimpleFileFinder{} +var _ core.ContentTyper = &SimpleFileFinder{} + +// SimpleFileFinder is a FileFinder-compliant implementation that +// stores Objects on disk using a straightforward directory layout. +// +// The following directory layout is used: +// if DisableGroupDirectory == false && SubDirectoryFileName == "" { +// ////. if namespaced or +// ///. if non-namespaced +// } +// else if DisableGroupDirectory == false && SubDirectoryFileName == "foo" { +// /////foo. if namespaced or +// ////foo. if non-namespaced +// } +// else if DisableGroupDirectory == true && SubDirectoryFileName == "" { +// ///. if namespaced or +// //. if non-namespaced +// } +// else if DisableGroupDirectory == true && SubDirectoryFileName == "foo" { +// ////foo. if namespaced or +// ///foo. if non-namespaced +// } +// +// is resolved by the FileExtensionResolver, for the given ContentType. +// +// This FileFinder does not support the ObjectAt method. +type SimpleFileFinder struct { + // Default: false; means enable group directory + DisableGroupDirectory bool + // Default: ""; means use file names as the means of storage + SubDirectoryFileName string + // Default: serializer.ContentTypeJSON + ContentType serializer.ContentType + // Default: DefaultFileExtensionResolver + FileExtensionResolver core.FileExtensionResolver +} + +// ObjectPath gets the file path relative to the root directory +func (f *SimpleFileFinder) ObjectPath(ctx context.Context, fs core.AferoContext, id core.UnversionedObjectID, namespaced bool) (string, error) { + // // + paths := []string{f.kindKeyPath(id.GroupKind())} + if namespaced { + // .// + paths = append(paths, id.ObjectKey().Namespace) + } + // Get the file extension + ext, err := f.ext() + if err != nil { + return "", err + } + if f.SubDirectoryFileName == "" { + // ./. + paths = append(paths, id.ObjectKey().Name+ext) + } else { + // .//. + paths = append(paths, id.ObjectKey().Name, f.SubDirectoryFileName+ext) + } + return filepath.Join(paths...), nil +} + +func (f *SimpleFileFinder) kindKeyPath(gk core.GroupKind) string { + if f.DisableGroupDirectory { + // .// + return filepath.Join(gk.Kind) + } + // ./// + return filepath.Join(gk.Group, gk.Kind) +} + +// ObjectAt retrieves the ID containing the virtual path based +// on the given physical file path. +func (f *SimpleFileFinder) ObjectAt(ctx context.Context, fs core.AferoContext, path string) (core.UnversionedObjectID, error) { + return nil, errors.New("not implemented") +} + +// ListNamespaces lists the available namespaces for the given GroupKind +// This function shall only be called for namespaced objects, it is up to +// the caller to make sure they do not call this method for root-spaced +// objects; for that the behavior is undefined (but returning an error +// is recommended). +func (f *SimpleFileFinder) ListNamespaces(ctx context.Context, fs core.AferoContext, gk core.GroupKind) ([]string, error) { + return readDir(ctx, fs, f.kindKeyPath(gk)) +} + +// ListObjectKeys returns a list of names (with optionally, the namespace). +// For namespaced GroupKinds, the caller must provide a namespace, and for +// root-spaced GroupKinds, the caller must not. When namespaced, this function +// must only return object keys for that given namespace. +func (f *SimpleFileFinder) ListObjectKeys(ctx context.Context, fs core.AferoContext, gk core.GroupKind, namespace string) ([]core.ObjectKey, error) { + // If namespace is empty, the names will be in ./, otherwise .// + namesDir := filepath.Join(f.kindKeyPath(gk), namespace) + entries, err := readDir(ctx, fs, namesDir) + if err != nil { + return nil, err + } + // Get the file extension + ext, err := f.ext() + if err != nil { + return nil, err + } + // Map the names to ObjectKeys + keys := make([]core.ObjectKey, 0, len(entries)) + for _, entry := range entries { + // Loop through all entries, and make sure they are sanitized .metadata.name's + if f.SubDirectoryFileName != "" { + // If f.SubDirectoryFileName != "", the file names already match .metadata.name + // Make sure the metadata file ./<.metadata.name>/. actually exists + expectedPath := filepath.Join(namesDir, entry, f.SubDirectoryFileName+ext) + if exists, _ := fs.Exists(ctx, expectedPath); !exists { + continue + } + } else { + // Storage path is ./.. entry is "." + // Verify the extension is there and strip it from name. If ext isn't there, just continue + if !strings.HasSuffix(entry, ext) { + continue + } + // Remove the extension from the name + entry = strings.TrimSuffix(entry, ext) + } + // If we got this far, add the key to the list + keys = append(keys, core.ObjectKey{Name: entry, Namespace: namespace}) + } + return keys, nil +} + +// ContentTypeForPath always returns f.ContentType, or ContentTypeJSON as a fallback if +// f.ContentType was not set. +func (f *SimpleFileFinder) ContentTypeForPath(ctx context.Context, _ core.AferoContext, path string) (serializer.ContentType, error) { + return f.contentType(), nil +} + +func (f *SimpleFileFinder) ext() (string, error) { + resolver := f.FileExtensionResolver + if resolver == nil { + resolver = core.DefaultFileExtensionResolver + } + ext, err := resolver.ExtensionForContentType(f.contentType()) + if err != nil { + return "", err + } + return ext, nil +} + +func (f *SimpleFileFinder) contentType() serializer.ContentType { + if len(f.ContentType) != 0 { + return f.ContentType + } + return serializer.ContentTypeJSON +} + +func readDir(ctx context.Context, fs core.AferoContext, dir string) ([]string, error) { + fi, err := fs.Stat(ctx, dir) + if os.IsNotExist(err) { + // It's ok if the directory doesn't exist (yet), we just don't have any items then :) + return nil, nil + } else if !fi.IsDir() { + // Unexpected, if the directory actually would be a file + return nil, fmt.Errorf("expected that %s is a directory", dir) + } + + // When we know that path is a directory, go ahead and read it + entries, err := fs.ReadDir(ctx, dir) + if err != nil { + return nil, err + } + fileNames := make([]string, 0, len(entries)) + for _, entry := range entries { + fileNames = append(fileNames, entry.Name()) + } + return fileNames, nil +} diff --git a/pkg/storage/raw/rawstorage.go b/pkg/storage/raw/rawstorage.go new file mode 100644 index 00000000..70a0c192 --- /dev/null +++ b/pkg/storage/raw/rawstorage.go @@ -0,0 +1,239 @@ +package raw + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + "strconv" + + "github.com/spf13/afero" + "github.com/weaveworks/libgitops/pkg/serializer" + "github.com/weaveworks/libgitops/pkg/storage/core" +) + +// NewGenericFilesystemStorage creates a new GenericFilesystemStorage using the given lower-level +// interface implementations. dir, fileFinder and namespacer are required and must hence be non-nil. +// If AferoContext in the options is set, it must have its root directory set (using NewBasePathFs) +// exactly to dir. +func NewGenericFilesystemStorage(dir string, fileFinder FileFinder, namespacer core.Namespacer, opts ...GenericFilesystemStorageOption) (FilesystemStorage, error) { + if len(dir) == 0 { + return nil, fmt.Errorf("NewGenericFilesystemStorage: dir is mandatory") + } + if fileFinder == nil { + return nil, fmt.Errorf("NewGenericFilesystemStorage: fileFinder is mandatory") + } + if namespacer == nil { + return nil, fmt.Errorf("NewGenericFilesystemStorage: namespacer is mandatory") + } + // Parse the options + o := (&GenericFilesystemStorageOptions{}).ApplyOptions(opts) + if o.AferoContext == nil { + // Default to ignoring the context parameter, only seeing things relative + // to dir, and operating on the local disk. + + // TODO: Make a helper for this, and possibly also a RootDirectory() string + // method to AferoContext, to make it easier to detect if that exists. + o.AferoContext = core.AferoWithoutContext(afero.NewBasePathFs(afero.NewOsFs(), dir)) + } // else validate that the given AferoContext has root dir set to dir + + return &GenericFilesystemStorage{ + dir: dir, + fileFinder: fileFinder, + namespacer: namespacer, + fs: o.AferoContext, + }, nil +} + +// GenericFilesystemStorage is a FilesystemStorage-compliant implementation, that +// combines the given lower-level FileFinder, Namespacer and AferoContext interfaces +// in a generic manner. +// +// Checksum is calculated based on the modification timestamp of the file, or +// alternatively, from info.Sys() returned from AferoContext.Stat(), if it can +// be cast to a ChecksumContainer. +type GenericFilesystemStorage struct { + dir string + fileFinder FileFinder + namespacer core.Namespacer + fs core.AferoContext +} + +func (r *GenericFilesystemStorage) Namespacer() core.Namespacer { + return r.namespacer +} + +func (r *GenericFilesystemStorage) Filesystem() core.AferoContext { + return r.fs +} + +func (r *GenericFilesystemStorage) FileFinder() FileFinder { + return r.fileFinder +} + +func (r *GenericFilesystemStorage) RootDirectory() string { + return r.dir +} + +func (r *GenericFilesystemStorage) Read(ctx context.Context, id core.UnversionedObjectID) ([]byte, error) { + // Check if the resource indicated by key exists + if !r.Exists(ctx, id) { + return nil, core.NewErrNotFound(id) + } + // Get the path + p, err := r.getPath(ctx, id) + if err != nil { + return nil, err + } + // Read the file + return r.fs.ReadFile(ctx, p) +} + +func (r *GenericFilesystemStorage) Exists(ctx context.Context, id core.UnversionedObjectID) bool { + // Get the path + p, err := r.getPath(ctx, id) + if err != nil { + return false + } + exists, _ := r.fs.Exists(ctx, p) + return exists +} + +func (r *GenericFilesystemStorage) Stat(ctx context.Context, id core.UnversionedObjectID) (ObjectInfo, error) { + // Get the path + p, err := r.getPath(ctx, id) + if err != nil { + return nil, err + } + + // Stat the file + info, err := r.fs.Stat(ctx, p) + if os.IsNotExist(err) { + return nil, core.NewErrNotFound(id) + } else if err != nil { + return nil, err + } + + // Get checksum + checksum := checksumFromFileInfo(info) + // Allow a custom implementation of afero return ObjectInfo directly + if chk, ok := info.Sys().(ChecksumContainer); ok { + checksum = chk.Checksum() + } + + // Get content type + contentType, err := r.contentType(ctx, p) + if err != nil { + return nil, err + } + + return &objectInfo{ + ct: contentType, + checksum: checksum, + filepath: p, + id: id, + }, nil +} + +func (r *GenericFilesystemStorage) ContentType(ctx context.Context, id core.UnversionedObjectID) (serializer.ContentType, error) { + // Get the path + p, err := r.getPath(ctx, id) + if err != nil { + return serializer.ContentType(""), err + } + // Resolve the content type for the path + return r.contentType(ctx, p) +} + +func (r *GenericFilesystemStorage) contentType(ctx context.Context, p string) (serializer.ContentType, error) { + return r.fileFinder.ContentTypeForPath(ctx, r.fs, p) +} + +func (r *GenericFilesystemStorage) Write(ctx context.Context, id core.UnversionedObjectID, content []byte) error { + // Get the path + p, err := r.getPath(ctx, id) + if err != nil { + return err + } + // Create the underlying directories if they do not exist already + if !r.Exists(ctx, id) { + if err := r.fs.MkdirAll(ctx, filepath.Dir(p), 0755); err != nil { + return err + } + } + // Write the file content + return r.fs.WriteFile(ctx, p, content, 0664) +} + +func (r *GenericFilesystemStorage) Delete(ctx context.Context, id core.UnversionedObjectID) error { + // Check if the resource indicated by key exists + if !r.Exists(ctx, id) { + return core.NewErrNotFound(id) + } + // Get the path + p, err := r.getPath(ctx, id) + if err != nil { + return err + } + // Remove the file + return r.fs.Remove(ctx, p) +} + +func (r *GenericFilesystemStorage) List(ctx context.Context, gk core.GroupKind, filterNs string) ([]core.ObjectKey, error) { + // Get namespacing info + namespaced, err := r.isNamespaced(gk) + if err != nil { + return nil, err + } + + if !namespaced { + // Make sure we don't have invalid input + if len(filterNs) != 0 { + return nil, errors.New("must not specify namespace filter for non-namespaced resource") + } + // Return the non-namespaced ObjectKeys from the FileFinder + return r.fileFinder.ListObjectKeys(ctx, r.fs, gk, "") + } + + // If filterNs is given, only search the given namespace + var namespaces []string + if len(filterNs) != 0 { + namespaces = []string{filterNs} + } else { + // Otherwise, list and loop all namespaces available for this GroupKind + namespaces, err = r.fileFinder.ListNamespaces(ctx, r.fs, gk) + if err != nil { + return nil, err + } + } + + // List keys for each namespace, and add to the keys slice + keys := []core.ObjectKey{} + for _, namespace := range namespaces { + newKeys, err := r.fileFinder.ListObjectKeys(ctx, r.fs, gk, namespace) + if err != nil { + return nil, err + } + keys = append(keys, newKeys...) + } + return keys, nil +} + +func (r *GenericFilesystemStorage) getPath(ctx context.Context, id core.UnversionedObjectID) (string, error) { + // Get namespacing info + namespaced, err := r.isNamespaced(id.GroupKind()) + if err != nil { + return "", err + } + // Get the path + return r.fileFinder.ObjectPath(ctx, r.fs, id, namespaced) +} + +func (r *GenericFilesystemStorage) isNamespaced(gk core.GroupKind) (bool, error) { + return r.namespacer.IsNamespaced(gk) +} + +func checksumFromFileInfo(fi os.FileInfo) string { + return strconv.FormatInt(fi.ModTime().UnixNano(), 10) +} diff --git a/pkg/storage/raw/rawstorage_options.go b/pkg/storage/raw/rawstorage_options.go new file mode 100644 index 00000000..7cb59278 --- /dev/null +++ b/pkg/storage/raw/rawstorage_options.go @@ -0,0 +1,33 @@ +package raw + +import ( + "github.com/weaveworks/libgitops/pkg/storage/core" +) + +type GenericFilesystemStorageOption interface { + ApplyToGenericFilesystemStorage(*GenericFilesystemStorageOptions) +} + +var _ GenericFilesystemStorageOption = &GenericFilesystemStorageOptions{} + +// GenericFilesystemStorageOptions specifies optional options for +// NewGenericFilesystemStorage. +type GenericFilesystemStorageOptions struct { + // AferoContext specifies a filesystem abstraction implementation. + // Default: An implementation scoped under the given root directory, + // operating on the local disk. + AferoContext core.AferoContext +} + +func (o *GenericFilesystemStorageOptions) ApplyToGenericFilesystemStorage(target *GenericFilesystemStorageOptions) { + if o.AferoContext != nil { + target.AferoContext = o.AferoContext + } +} + +func (o *GenericFilesystemStorageOptions) ApplyOptions(opts []GenericFilesystemStorageOption) *GenericFilesystemStorageOptions { + for _, opt := range opts { + opt.ApplyToGenericFilesystemStorage(o) + } + return o +} diff --git a/pkg/storage/rawstorage.go b/pkg/storage/rawstorage.go deleted file mode 100644 index aa8ab642..00000000 --- a/pkg/storage/rawstorage.go +++ /dev/null @@ -1,303 +0,0 @@ -package storage - -import ( - "context" - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "strings" - - "github.com/weaveworks/libgitops/pkg/serializer" - "github.com/weaveworks/libgitops/pkg/util" -) - -// RawStorage is a Key-indexed low-level interface to -// store byte-encoded Objects (resources) in non-volatile -// memory. -// TODO: Add thread-safety so it is not possible to issue a Write() or Delete() -// at the same time as any other read operation. -type RawStorage interface { - // Read returns a resource's content based on key. - // If the resource does not exist, it returns ErrNotFound. - Read(ctx context.Context, key ObjectKey) ([]byte, error) - // Exists checks if the resource indicated by key exists. - Exists(ctx context.Context, key ObjectKey) bool - // Write writes the given content to the resource indicated by key. - // Error returns are implementation-specific. - Write(ctx context.Context, key ObjectKey, content []byte) error - // Delete deletes the resource indicated by key. - // If the resource does not exist, it returns ErrNotFound. - Delete(ctx context.Context, key ObjectKey) error - // List returns all matching object keys based on the given KindKey. - List(ctx context.Context, key KindKey) ([]ObjectKey, error) - // Checksum returns a string checksum for the resource indicated by key. - // If the resource does not exist, it returns ErrNotFound. - Checksum(ctx context.Context, key ObjectKey) (string, error) - // ContentType returns the content type of the contents of the resource indicated by key. - ContentType(ctx context.Context, key ObjectKey) serializer.ContentType - - // TODO: A Stat() command instead of Exists/Checksum/ContentType? - - // WatchDir returns the path for Watchers to watch changes in. - WatchDir() string - // GetKey retrieves the Key containing the virtual path based - // on the given physical file path returned by a Watcher. - // TODO: Make this a separate interface - GetKey(path string) (ObjectKey, error) - - // Namespacer gives access to the namespacer that is used - Namespacer() Namespacer -} - -func NewGenericRawStorage(dir string, ct serializer.ContentType, namespacer Namespacer, opts ...GenericRawStorageOption) RawStorage { - if len(dir) == 0 { - panic("NewGenericRawStorage: dir is mandatory") - } - ext := extForContentType(ct) - if ext == "" { - panic("NewGenericRawStorage: Invalid content type") - } - if namespacer == nil { - panic("NewGenericRawStorage: namespacer is mandatory") - } - o := (&GenericRawStorageOptions{}).ApplyOptions(opts) - return &GenericRawStorage{ - dir: dir, - ct: ct, - namespacer: namespacer, - opts: *o, - ext: ext, - } -} - -// GenericRawStorage is a rawstorage which stores objects as JSON files on disk, -// in either of the forms: -// ////. -// ///. -// The GenericRawStorage only supports one GroupVersion at a time, and will error if given -// any other resources -type GenericRawStorage struct { - dir string - ct serializer.ContentType - ext string - namespacer Namespacer - opts GenericRawStorageOptions -} - -func (r *GenericRawStorage) keyPath(key ObjectKey) string { - // // - paths := []string{r.kindKeyPath(key.Kind())} - if r.isNamespaced(key.Kind()) { - // .// - paths = append(paths, key.NamespacedName().Namespace) - } - if r.opts.SubDirectoryFileName == nil { - // ./. - paths = append(paths, key.NamespacedName().Name+r.ext) - } else { - // .//. - paths = append(paths, key.NamespacedName().Name, *r.opts.SubDirectoryFileName+r.ext) - } - - return filepath.Join(paths...) -} - -func (r *GenericRawStorage) Namespacer() Namespacer { - return r.namespacer -} - -func (r *GenericRawStorage) isNamespaced(gvk KindKey) bool { - namespaced, err := r.namespacer.IsNamespaced(gvk.GroupKind()) - if err != nil { - panic(err) // TODO: handle this better - } - return namespaced -} - -func (r *GenericRawStorage) kindKeyPath(gvk KindKey) string { - if r.opts.DisableGroupDirectory != nil && *r.opts.DisableGroupDirectory { - // /// - return filepath.Join(r.dir, gvk.Kind) - } - // //// - return filepath.Join(r.dir, gvk.Group, gvk.Kind) -} - -func (r *GenericRawStorage) Read(ctx context.Context, key ObjectKey) ([]byte, error) { - // Check if the resource indicated by key exists - if !r.Exists(ctx, key) { - return nil, ErrNotFound - } - - return ioutil.ReadFile(r.keyPath(key)) -} - -func (r *GenericRawStorage) Exists(_ context.Context, key ObjectKey) bool { - return util.FileExists(r.keyPath(key)) -} - -func (r *GenericRawStorage) Write(ctx context.Context, key ObjectKey, content []byte) error { - file := r.keyPath(key) - - // Create the underlying directories if they do not exist already - if !r.Exists(ctx, key) { - if err := os.MkdirAll(filepath.Dir(file), 0755); err != nil { - return err - } - } - - return ioutil.WriteFile(file, content, 0644) -} - -func (r *GenericRawStorage) Delete(ctx context.Context, key ObjectKey) error { - // Check if the resource indicated by key exists - if !r.Exists(ctx, key) { - return ErrNotFound - } - - return os.RemoveAll(filepath.Dir(r.keyPath(key))) -} - -func (r *GenericRawStorage) List(_ context.Context, kind KindKey) ([]ObjectKey, error) { - // If the expected directory does not exist, just return an empty (nil) slice - dir := r.kindKeyPath(kind) - - var keys []ObjectKey - if !r.isNamespaced(kind) { - // Names are listed in kindKeyPath - names, err := r.listNamesInDir(dir) - if err != nil { - return nil, err - } - for _, name := range names { - keys = append(keys, NewObjectKey(kind, NamespacedName{Name: name})) - } - return keys, nil - } - - // Namespaces are listed in kindKeyPath - namespaces, err := readDir(dir) - if err != nil { - return nil, err - } - for _, namespace := range namespaces { - // Names are listed in / - names, err := r.listNamesInDir(filepath.Join(dir, namespace)) - if err != nil { - return nil, err - } - for _, name := range names { - keys = append(keys, NewObjectKey(kind, NamespacedName{Name: name, Namespace: namespace})) - } - } - - return keys, nil -} - -func (r *GenericRawStorage) listNamesInDir(dir string) ([]string, error) { - entries, err := readDir(dir) - if err != nil { - return nil, err - } - - names := make([]string, 0, len(entries)) - for _, entry := range entries { - // Loop through all names, and make sure they are sanitized .metadata.name's - // If r.opts.SubDirectoryFileName != nil, the file names already match .metadata.name - if r.opts.SubDirectoryFileName != nil { - // TODO: We could add even stronger validation here - // Make sure the file /<.metadata.name>/. actually exists. - // It could be that only the .metadata.name directory exists, but not the file underneath. - expectedPath := filepath.Join(dir, entry, *r.opts.SubDirectoryFileName+r.ext) - if util.FileExists(expectedPath) { - names = append(names, entry) - } - continue - } - - // Storage path is ./.. entry is "." - // Verify the extension is there and strip it from name. If ext isn't there, just continue - if !strings.HasSuffix(entry, r.ext) { - continue - } - names = append(names, strings.TrimSuffix(entry, r.ext)) - } - return names, nil -} - -// This returns the modification time as a UnixNano string -// If the file doesn't exist, return ErrNotFound -func (r *GenericRawStorage) Checksum(ctx context.Context, key ObjectKey) (string, error) { - // Check if the resource indicated by key exists - if !r.Exists(ctx, key) { - return "", ErrNotFound - } - - return checksumFromModTime(r.keyPath(key)) -} - -func (r *GenericRawStorage) ContentType(_ context.Context, _ ObjectKey) serializer.ContentType { - return r.ct -} - -func (r *GenericRawStorage) WatchDir() string { - return r.dir -} - -func (r *GenericRawStorage) GetKey(p string) (ObjectKey, error) { - /* TODO: Needs re-writing - - splitDir := strings.Split(filepath.Clean(r.opts.Directory), string(os.PathSeparator)) - splitPath := strings.Split(filepath.Clean(p), string(os.PathSeparator)) - - if len(splitPath) < len(splitDir)+2 { - return nil, fmt.Errorf("path not long enough: %s", p) - } - - for i := 0; i < len(splitDir); i++ { - if splitDir[i] != splitPath[i] { - return nil, fmt.Errorf("path has wrong base: %s", p) - } - } - kind := splitPath[len(splitDir)] - uid := splitPath[len(splitDir)+1] - gvk := schema.GroupVersionKind{ - Group: r.gv.Group, - Version: r.gv.Version, - Kind: kind, - } - - return NewObjectKey(NewKindKey(gvk), runtime.NewIdentifier(uid)), nil*/ - return nil, errors.New("not implemented") -} - -func checksumFromModTime(path string) (string, error) { - fi, err := os.Stat(path) - if err != nil { - return "", err - } - - return strconv.FormatInt(fi.ModTime().UnixNano(), 10), nil -} - -func readDir(dir string) ([]string, error) { - if ok, fi := util.PathExists(dir); !ok { - return nil, nil - } else if !fi.IsDir() { - return nil, fmt.Errorf("expected that %s is a directory", dir) - } - - // When we know that path is a directory, go ahead and read it - entries, err := ioutil.ReadDir(dir) - if err != nil { - return nil, err - } - fileNames := make([]string, 0, len(entries)) - for _, entry := range entries { - fileNames = append(fileNames, entry.Name()) - } - return fileNames, nil -} diff --git a/pkg/storage/rawstorage_options.go b/pkg/storage/rawstorage_options.go deleted file mode 100644 index 709dc94c..00000000 --- a/pkg/storage/rawstorage_options.go +++ /dev/null @@ -1,42 +0,0 @@ -package storage - -import "github.com/weaveworks/libgitops/pkg/util" - -type GenericRawStorageOption interface { - ApplyToGenericRawStorage(*GenericRawStorageOptions) -} - -type GenericRawStorageOptions struct { - // SubDirectoryFileName specifies an alternate storage path form of - // /////. - // if non-empty - // +optional - SubDirectoryFileName *string - // DisableGroupDirectory can be set to true in order to not include the group - // in the file path, so that the storage path becomes: - // ///. - // +optional - DisableGroupDirectory *bool -} - -func (o *GenericRawStorageOptions) ApplyToGenericRawStorage(target *GenericRawStorageOptions) { - if o.SubDirectoryFileName != nil { - target.SubDirectoryFileName = o.SubDirectoryFileName - } - if o.DisableGroupDirectory != nil { - target.DisableGroupDirectory = o.DisableGroupDirectory - } -} - -func (o *GenericRawStorageOptions) ApplyOptions(opts []GenericRawStorageOption) *GenericRawStorageOptions { - for _, opt := range opts { - opt.ApplyToGenericRawStorage(o) - } - return o -} - -type NoGroupDirectory bool - -func (d NoGroupDirectory) ApplyToGenericRawStorage(target *GenericRawStorageOptions) { - target.DisableGroupDirectory = util.BoolPtr(bool(d)) -} From d4149fe5486857b002ba6559c030ea48a0c1f580 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Tue, 19 Jan 2021 01:09:26 +0200 Subject: [PATCH 037/149] Rewrite GenericMappedRawStorage into GenericMappedFileFinder. --- pkg/storage/mappedrawstorage.go | 177 --------------------------- pkg/storage/raw/filefinder_mapped.go | 146 ++++++++++++++++++++++ pkg/storage/raw/mapped_cache.go | 104 ++++++++++++++++ 3 files changed, 250 insertions(+), 177 deletions(-) delete mode 100644 pkg/storage/mappedrawstorage.go create mode 100644 pkg/storage/raw/filefinder_mapped.go create mode 100644 pkg/storage/raw/mapped_cache.go diff --git a/pkg/storage/mappedrawstorage.go b/pkg/storage/mappedrawstorage.go deleted file mode 100644 index d41641ce..00000000 --- a/pkg/storage/mappedrawstorage.go +++ /dev/null @@ -1,177 +0,0 @@ -package storage - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sync" - - log "github.com/sirupsen/logrus" - "github.com/weaveworks/libgitops/pkg/serializer" - "github.com/weaveworks/libgitops/pkg/util" -) - -var ( - // ErrNotTracked is returned when the requested resource wasn't found. - ErrNotTracked = fmt.Errorf("untracked object: %w", ErrNotFound) -) - -// MappedRawStorage is an interface for RawStorages which store their -// data in a flat/unordered directory format like manifest directories. -type MappedRawStorage interface { - RawStorage - - // AddMapping binds a Key's virtual path to a physical file path - AddMapping(key ObjectKey, path string) - // RemoveMapping removes the physical file - // path mapping matching the given Key - RemoveMapping(key ObjectKey) - - // SetMappings overwrites all known mappings - SetMappings(m map[ObjectKey]string) -} - -func NewGenericMappedRawStorage(dir string) MappedRawStorage { - return &GenericMappedRawStorage{ - dir: dir, - fileMappings: make(map[ObjectKey]string), - mux: &sync.Mutex{}, - } -} - -// GenericMappedRawStorage is the default implementation of a MappedRawStorage, -// it stores files in the given directory via a path translation map. -type GenericMappedRawStorage struct { - dir string - fileMappings map[ObjectKey]string - mux *sync.Mutex -} - -func (r *GenericMappedRawStorage) realPath(key ObjectKey) (string, error) { - r.mux.Lock() - path, ok := r.fileMappings[key] - r.mux.Unlock() - if !ok { - return "", fmt.Errorf("GenericMappedRawStorage: cannot resolve %q: %w", key, ErrNotTracked) - } - - return path, nil -} - -// If the file doesn't exist, returns ErrNotFound + ErrNotTracked. -func (r *GenericMappedRawStorage) Read(key ObjectKey) ([]byte, error) { - file, err := r.realPath(key) - if err != nil { - return nil, err - } - - return ioutil.ReadFile(file) -} - -func (r *GenericMappedRawStorage) Exists(key ObjectKey) bool { - file, err := r.realPath(key) - if err != nil { - return false - } - - return util.FileExists(file) -} - -func (r *GenericMappedRawStorage) Write(key ObjectKey, content []byte) error { - // GenericMappedRawStorage isn't going to generate files itself, - // only write if the file is already known - file, err := r.realPath(key) - if err != nil { - return err - } - - return ioutil.WriteFile(file, content, 0644) -} - -// If the file doesn't exist, returns ErrNotFound + ErrNotTracked. -func (r *GenericMappedRawStorage) Delete(key ObjectKey) (err error) { - file, err := r.realPath(key) - if err != nil { - return - } - - // GenericMappedRawStorage files can be deleted - // externally, check that the file exists first - if util.FileExists(file) { - err = os.Remove(file) - } - - if err == nil { - r.RemoveMapping(key) - } - - return -} - -func (r *GenericMappedRawStorage) List(kind KindKey) ([]ObjectKey, error) { - result := make([]ObjectKey, 0) - - for key := range r.fileMappings { - // Include objects with the same kind and group, ignore version mismatches - if key.EqualsGVK(kind, false) { - result = append(result, key) - } - } - - return result, nil -} - -// This returns the modification time as a UnixNano string. -// If the file doesn't exist, returns ErrNotFound + ErrNotTracked. -func (r *GenericMappedRawStorage) Checksum(key ObjectKey) (string, error) { - path, err := r.realPath(key) - if err != nil { - return "", err - } - - return checksumFromModTime(path) -} - -func (r *GenericMappedRawStorage) ContentType(key ObjectKey) (ct serializer.ContentType) { - if file, err := r.realPath(key); err == nil { - ct = ContentTypes[filepath.Ext(file)] // Retrieve the correct format based on the extension - } - - return -} - -func (r *GenericMappedRawStorage) WatchDir() string { - return r.dir -} - -func (r *GenericMappedRawStorage) GetKey(path string) (ObjectKey, error) { - for key, p := range r.fileMappings { - if p == path { - return key, nil - } - } - - return objectKey{}, fmt.Errorf("no mapping found for path %q", path) -} - -func (r *GenericMappedRawStorage) AddMapping(key ObjectKey, path string) { - log.Debugf("GenericMappedRawStorage: AddMapping: %q -> %q", key, path) - r.mux.Lock() - r.fileMappings[key] = path - r.mux.Unlock() -} - -func (r *GenericMappedRawStorage) RemoveMapping(key ObjectKey) { - log.Debugf("GenericMappedRawStorage: RemoveMapping: %q", key) - r.mux.Lock() - delete(r.fileMappings, key) - r.mux.Unlock() -} - -func (r *GenericMappedRawStorage) SetMappings(m map[ObjectKey]string) { - log.Debugf("GenericMappedRawStorage: SetMappings: %v", m) - r.mux.Lock() - r.fileMappings = m - r.mux.Unlock() -} diff --git a/pkg/storage/raw/filefinder_mapped.go b/pkg/storage/raw/filefinder_mapped.go new file mode 100644 index 00000000..a1807614 --- /dev/null +++ b/pkg/storage/raw/filefinder_mapped.go @@ -0,0 +1,146 @@ +package raw + +import ( + "context" + "errors" + "fmt" + + "github.com/weaveworks/libgitops/pkg/serializer" + "github.com/weaveworks/libgitops/pkg/storage/core" +) + +var ( + // ErrNotTracked is returned when the requested resource wasn't found. + ErrNotTracked = errors.New("untracked object") +) + +// GenericMappedFileFinder implements MappedFileFinder. +var _ MappedFileFinder = &GenericMappedFileFinder{} + +// NewGenericMappedFileFinder creates a new instance of GenericMappedFileFinder, +// that implements the MappedFileFinder interface. The contentTyper is optional, +// by default core.DefaultContentTyper will be used. +func NewGenericMappedFileFinder(contentTyper core.ContentTyper) MappedFileFinder { + if contentTyper == nil { + contentTyper = core.DefaultContentTyper + } + return &GenericMappedFileFinder{ + contentTyper: contentTyper, + branch: &branchImpl{}, + } +} + +// GenericMappedFileFinder is a generic implementation of MappedFileFinder. +// It uses a ContentTyper to identify what content type a file uses. +// +// This implementation relies on that all information about what files exist +// is fed through SetMapping(s). If a file or ID is requested that doesn't +// exist in the internal cache, ErrNotTracked will be returned. +// +// Hence, this implementation does not at the moment support creating net-new +// Objects without someone calling SetMapping() first. +type GenericMappedFileFinder struct { + // Default: DefaultContentTyper + contentTyper core.ContentTyper + + branch branch +} + +// ObjectPath gets the file path relative to the root directory +func (f *GenericMappedFileFinder) ObjectPath(ctx context.Context, _ core.AferoContext, id core.UnversionedObjectID, namespaced bool) (string, error) { + ns := id.ObjectKey().Namespace + // TODO: can we do this better? + if namespaced && ns == "" { + return "", fmt.Errorf("invalid empty namespace for namespaced object") + } else if !namespaced && ns != "" { + return "", fmt.Errorf("invalid non-empty namespace for non-namespaced object") + } + cp, ok := f.GetMapping(ctx, id) + if !ok { + return "", ErrNotTracked + } + return cp.Path, nil +} + +// ObjectAt retrieves the ID containing the virtual path based +// on the given physical file path. +func (f *GenericMappedFileFinder) ObjectAt(ctx context.Context, _ core.AferoContext, path string) (core.UnversionedObjectID, error) { + // TODO: Add reverse tracking too? + for gk, gkIter := range f.branch.raw() { + for ns, nsIter := range gkIter.raw() { + for name, cp := range nsIter.raw() { + if cp.Path == path { + return core.NewUnversionedObjectID(gk, core.ObjectKey{Name: name, Namespace: ns}), nil + } + } + } + } + // TODO: Support "creation" of Objects easier, in a generic way through an interface, e.g. + // NewObjectPlacer? + return nil, ErrNotTracked +} + +// ListNamespaces lists the available namespaces for the given GroupKind +// This function shall only be called for namespaced objects, it is up to +// the caller to make sure they do not call this method for root-spaced +// objects; for that the behavior is undefined (but returning an error +// is recommended). +func (f *GenericMappedFileFinder) ListNamespaces(ctx context.Context, _ core.AferoContext, gk core.GroupKind) ([]string, error) { + m := f.branch.groupKind(gk).raw() + nsList := make([]string, 0, len(m)) + for ns := range m { + nsList = append(nsList, ns) + } + return nsList, nil +} + +// ListObjectKeys returns a list of names (with optionally, the namespace). +// For namespaced GroupKinds, the caller must provide a namespace, and for +// root-spaced GroupKinds, the caller must not. When namespaced, this function +// must only return object keys for that given namespace. +func (f *GenericMappedFileFinder) ListObjectKeys(ctx context.Context, _ core.AferoContext, gk core.GroupKind, namespace string) ([]core.ObjectKey, error) { + m := f.branch.groupKind(gk).namespace(namespace).raw() + names := make([]core.ObjectKey, 0, len(m)) + for name := range m { + names = append(names, core.ObjectKey{Name: name, Namespace: namespace}) + } + return names, nil +} + +func (f *GenericMappedFileFinder) ContentTypeForPath(ctx context.Context, fs core.AferoContext, path string) (serializer.ContentType, error) { + return f.contentTyper.ContentTypeForPath(ctx, fs, path) +} + +// GetMapping retrieves a mapping in the system +func (f *GenericMappedFileFinder) GetMapping(ctx context.Context, id core.UnversionedObjectID) (ChecksumPath, bool) { + cp, ok := f.branch. + groupKind(id.GroupKind()). + namespace(id.ObjectKey().Namespace). + name(id.ObjectKey().Name) + return cp, ok +} + +// SetMapping binds an ID's virtual path to a physical file path +func (f *GenericMappedFileFinder) SetMapping(ctx context.Context, id core.UnversionedObjectID, checksumPath ChecksumPath) { + f.branch. + groupKind(id.GroupKind()). + namespace(id.ObjectKey().Namespace). + setName(id.ObjectKey().Name, checksumPath) +} + +// SetMappings replaces all mappings at once +func (f *GenericMappedFileFinder) SetMappings(ctx context.Context, m map[core.UnversionedObjectID]ChecksumPath) { + f.branch = &branchImpl{} + for id, cp := range m { + f.SetMapping(ctx, id, cp) + } +} + +// DeleteMapping removes the physical file path mapping +// matching the given id +func (f *GenericMappedFileFinder) DeleteMapping(ctx context.Context, id core.UnversionedObjectID) { + f.branch. + groupKind(id.GroupKind()). + namespace(id.ObjectKey().Namespace). + deleteName(id.ObjectKey().Name) +} diff --git a/pkg/storage/raw/mapped_cache.go b/pkg/storage/raw/mapped_cache.go new file mode 100644 index 00000000..28455f18 --- /dev/null +++ b/pkg/storage/raw/mapped_cache.go @@ -0,0 +1,104 @@ +package raw + +import "github.com/weaveworks/libgitops/pkg/storage/core" + +// This file contains a set of private interfaces and implementations +// that allows caching mappings between a core.UnversionedObjectID +// and a ChecksumPath. + +// TODO: rename this interface +type branch interface { + groupKind(core.GroupKind) groupKind + raw() map[core.GroupKind]groupKind +} + +type groupKind interface { + namespace(string) namespace + raw() map[string]namespace +} + +type namespace interface { + name(string) (ChecksumPath, bool) + setName(string, ChecksumPath) + deleteName(string) + raw() map[string]ChecksumPath +} + +type branchImpl struct { + m map[core.GroupKind]groupKind +} + +func (b *branchImpl) groupKind(gk core.GroupKind) groupKind { + if b.m == nil { + b.m = make(map[core.GroupKind]groupKind) + } + val, ok := b.m[gk] + if !ok { + val = &groupKindImpl{} + b.m[gk] = val + } + return val +} + +func (b *branchImpl) raw() map[core.GroupKind]groupKind { + if b.m == nil { + b.m = make(map[core.GroupKind]groupKind) + } + return b.m +} + +type groupKindImpl struct { + m map[string]namespace +} + +func (g *groupKindImpl) namespace(ns string) namespace { + if g.m == nil { + g.m = make(map[string]namespace) + } + val, ok := g.m[ns] + if !ok { + val = &namespaceImpl{} + g.m[ns] = val + } + return val +} + +func (g *groupKindImpl) raw() map[string]namespace { + if g.m == nil { + g.m = make(map[string]namespace) + } + return g.m +} + +type namespaceImpl struct { + m map[string]ChecksumPath +} + +func (n *namespaceImpl) name(name string) (ChecksumPath, bool) { + if n.m == nil { + n.m = make(map[string]ChecksumPath) + } + cp, ok := n.m[name] + return cp, ok +} + +func (n *namespaceImpl) setName(name string, cp ChecksumPath) { + if n.m == nil { + n.m = make(map[string]ChecksumPath) + } + n.m[name] = cp +} + +func (n *namespaceImpl) deleteName(name string) { + if n.m == nil { + n.m = make(map[string]ChecksumPath) + } + delete(n.m, name) +} + +func (n *namespaceImpl) raw() map[string]ChecksumPath { + if n.m == nil { + n.m = make(map[string]ChecksumPath) + } + return n.m +} From 7acf4dcdb17fb9592a89b85ddb297254032a8bee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Tue, 19 Jan 2021 01:10:05 +0200 Subject: [PATCH 038/149] A small glue file implementing for implementing the ObjectInfo interface. --- pkg/storage/raw/objectinfo.go | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 pkg/storage/raw/objectinfo.go diff --git a/pkg/storage/raw/objectinfo.go b/pkg/storage/raw/objectinfo.go new file mode 100644 index 00000000..51936159 --- /dev/null +++ b/pkg/storage/raw/objectinfo.go @@ -0,0 +1,20 @@ +package raw + +import ( + "github.com/weaveworks/libgitops/pkg/serializer" + "github.com/weaveworks/libgitops/pkg/storage/core" +) + +var _ ObjectInfo = &objectInfo{} + +type objectInfo struct { + ct serializer.ContentType + checksum string + filepath string + id core.UnversionedObjectID +} + +func (o *objectInfo) ContentType() serializer.ContentType { return o.ct } +func (o *objectInfo) Checksum() string { return o.checksum } +func (o *objectInfo) Path() string { return o.filepath } +func (o *objectInfo) ID() core.UnversionedObjectID { return o.id } From fc339e1da1bbafbf69c285b0f49a111adf0ee269 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Tue, 19 Jan 2021 02:27:52 +0200 Subject: [PATCH 039/149] Add event types for both files and objects, and interfaces for unstructured storages, and watching for both file and object events at all levels. --- pkg/storage/raw/interfaces.go | 23 ++++++++ pkg/storage/raw/watch/events.go | 83 ++++++++++++++++++++++++++ pkg/storage/raw/watch/interfaces.go | 91 +++++++++++++++++++++++++++++ 3 files changed, 197 insertions(+) create mode 100644 pkg/storage/raw/watch/events.go create mode 100644 pkg/storage/raw/watch/interfaces.go diff --git a/pkg/storage/raw/interfaces.go b/pkg/storage/raw/interfaces.go index cea714ee..62f9d2c5 100644 --- a/pkg/storage/raw/interfaces.go +++ b/pkg/storage/raw/interfaces.go @@ -155,6 +155,9 @@ type FileFinder interface { // cache with mappings between UnversionedObjectID and a ChecksumPath. This allows // higher-order interfaces to manage Objects in files in an unorganized directory // (e.g. a Git repo). +// +// Multiple Objects in the same file, or multiple Objects with the +// same ID in multiple files are not supported. type MappedFileFinder interface { FileFinder @@ -169,6 +172,26 @@ type MappedFileFinder interface { DeleteMapping(ctx context.Context, id core.UnversionedObjectID) } +// UnstructuredStorage is a raw Storage interface that builds on top +// of FilesystemStorage. It uses an ObjectRecognizer to recognize +// otherwise unknown objects in unstructured files. +// The FilesystemStorage must use a MappedFileFinder underneath. +// +// Multiple Objects in the same file, or multiple Objects with the +// same ID in multiple files are not supported. +type UnstructuredStorage interface { + FilesystemStorage + + // Sync synchronizes the current state of the filesystem with the + // cached mappings in the MappedFileFinder. + Sync(ctx context.Context) error + + // ObjectRecognizer returns the underlying ObjectRecognizer used. + ObjectRecognizer() core.ObjectRecognizer + // MappedFileFinder returns the underlying MappedFileFinder used. + MappedFileFinder() MappedFileFinder +} + // ChecksumPath is a tuple of a given Checksum and relative file Path, // for use in MappedFileFinder. type ChecksumPath struct { diff --git a/pkg/storage/raw/watch/events.go b/pkg/storage/raw/watch/events.go new file mode 100644 index 00000000..7f60c087 --- /dev/null +++ b/pkg/storage/raw/watch/events.go @@ -0,0 +1,83 @@ +package watch + +import ( + "fmt" + + "github.com/weaveworks/libgitops/pkg/storage/core" +) + +// FileEventType is an enum describing a change in a file's state +type FileEventType byte + +const ( + FileEventNone FileEventType = iota // 0 + FileEventModify // 1 + FileEventDelete // 2 + FileEventMove // 3 +) + +func (e FileEventType) String() string { + switch e { + case 0: + return "NONE" + case 1: + return "MODIFY" + case 2: + return "DELETE" + case 3: + return "MOVE" + } + + return "UNKNOWN" +} + +// FileEvent describes a file change of a certain kind at a certain +// (relative) path. Often emitted by FileEventsEmitter. +type FileEvent struct { + Path string + Type FileEventType +} + +// FileEventStream is a channel of FileEvents +type FileEventStream chan *FileEvent + +// ObjectEventType is an enum describing a change in an Object's state. +type ObjectEventType byte + +var _ fmt.Stringer = ObjectEventType(0) + +const ( + ObjectEventNone ObjectEventType = iota // 0 + ObjectEventCreate // 1 + ObjectEventUpdate // 2 + ObjectEventDelete // 3 + ObjectEventSync // 4 +) + +func (o ObjectEventType) String() string { + switch o { + case 0: + return "NONE" + case 1: + return "CREATE" + case 2: + return "UPDATE" + case 3: + return "DELETE" + case 4: + return "SYNC" + } + + // Should never happen + return "UNKNOWN" +} + +// ObjectEvent describes a change that has been observed +// for the given object with the given ID. +type ObjectEvent struct { + ID core.UnversionedObjectID + Type ObjectEventType +} + +// ObjectEventStream is a channel of ObjectEvents +type ObjectEventStream chan *ObjectEvent diff --git a/pkg/storage/raw/watch/interfaces.go b/pkg/storage/raw/watch/interfaces.go new file mode 100644 index 00000000..67bc335a --- /dev/null +++ b/pkg/storage/raw/watch/interfaces.go @@ -0,0 +1,91 @@ +package watch + +import ( + "context" + "errors" + "io" + + "github.com/weaveworks/libgitops/pkg/storage/core" + "github.com/weaveworks/libgitops/pkg/storage/raw" +) + +var ( + // ErrTooManyWatches can happen when trying to register too many + // watching reciever channels to an event emitter. + ErrTooManyWatches = errors.New("too many watches already opened") +) + +// FileEventsEmitter is an interface that provides high-level inotify-like +// behaviour to consumers. It can be used e.g. by even higher-level +// interfaces like FilesystemEventStorage. +type FileEventsEmitter interface { + // WatchForFileEvents starts feeding FileEvents into the given "into" + // channel. The caller is responsible for setting a channel buffering + // limit large enough to not block normal operation. An error might + // be returned if a maximum amount of watches has been opened already, + // e.g. ErrTooManyWatches. + WatchForFileEvents(ctx context.Context, into FileEventStream) error + + // Suspend blocks the next event dispatch for this given path. Useful + // for not sending "your own" modification events into the + // FileEventStream that is listening. path is relative. + Suspend(ctx context.Context, path string) + + // PathExcluder returns the PathExcluder used internally + PathExcluder() core.PathExcluder + // ContentTyper returns the ContentTyper used internally + ContentTyper() core.ContentTyper + // Filesystem returns the filesystem abstraction used internally + Filesystem() core.AferoContext + + // Close closes the emitter gracefully. + io.Closer +} + +// EventStorageCommon contains the methods that EventStorage adds to the +// to the normal raw.Storage. +type EventStorageCommon interface { + // WatchForObjectEvents starts feeding ObjectEvents into the given "into" + // channel. The caller is responsible for setting a channel buffering + // limit large enough to not block normal operation. An error might + // be returned if a maximum amount of watches has been opened already, + // e.g. ErrTooManyWatches. + WatchForObjectEvents(ctx context.Context, into ObjectEventStream) error + + // Close closes the EventStorage and underlying resources gracefully. + io.Closer +} + +// FileEventStorageCommon is an extension to EventStorageCommon that +// also contains an underlying FileEventsEmitter. This is meant to be +// used in tandem with raw.FilesystemStorages. +type FileEventStorageCommon interface { + EventStorageCommon + + // FileEventsEmitter gets the FileEventsEmitter used internally. + FileEventsEmitter() FileEventsEmitter +} + +// EventStorage is the abstract combination of a normal raw.Storage, and +// a possiblility to listen for changes to objects as they change. +type EventStorage interface { + raw.Storage + EventStorageCommon +} + +// FilesystemEventStorage is the combination of a raw.FilesystemStorage, +// and the possibility to listen for object updates from a FileEventsEmitter. +type FilesystemEventStorage interface { + raw.FilesystemStorage + FileEventStorageCommon +} + +// UnstructuredEventStorage is an extension of raw.UnstructuredStorage, that +// adds the possiblility to listen for object updates from a FileEventsEmitter. +// +// When the Sync() function is run; the ObjectEvents that are emitted to the +// listening channels with have ObjectEvent.Type == ObjectEventSync. +type UnstructuredEventStorage interface { + raw.UnstructuredStorage + FileEventStorageCommon +} From 7ff01ea08b8ffde62910786780ed461be07465c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Tue, 19 Jan 2021 02:28:57 +0200 Subject: [PATCH 040/149] Move the directory traversal code from watcherutil to core; make it use the existing, generic interfaces in core. --- pkg/storage/core/dir_traversal.go | 37 +++++++++++++++++++ pkg/util/watcher/dir_traversal.go | 60 ------------------------------- 2 files changed, 37 insertions(+), 60 deletions(-) create mode 100644 pkg/storage/core/dir_traversal.go delete mode 100644 pkg/util/watcher/dir_traversal.go diff --git a/pkg/storage/core/dir_traversal.go b/pkg/storage/core/dir_traversal.go new file mode 100644 index 00000000..8e13ade4 --- /dev/null +++ b/pkg/storage/core/dir_traversal.go @@ -0,0 +1,37 @@ +package core + +import ( + "context" + "os" +) + +// ListValidFilesInFilesystem discovers files in the given AferoContext that has a +// ContentType that contentTyper recognizes, and is not a path that is excluded by +// pathExcluder. +func ListValidFilesInFilesystem(ctx context.Context, fs AferoContext, contentTyper ContentTyper, pathExcluder PathExcluder) (files []string, err error) { + err = fs.Walk(ctx, "", func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + // Only include valid files + if !info.IsDir() && IsValidFileInFilesystem(ctx, fs, contentTyper, pathExcluder, path) { + files = append(files, path) + } + return nil + }) + return +} + +// IsValidFileInFilesystem checks if file (a relative path) has a ContentType +// that contentTyper recognizes, and is not a path that is excluded by pathExcluder. +func IsValidFileInFilesystem(ctx context.Context, fs AferoContext, contentTyper ContentTyper, pathExcluder PathExcluder, file string) bool { + // return false if this path should be excluded + if pathExcluder.ShouldExcludePath(ctx, fs, file) { + return false + } + + // If the content type is valid for this path, err == nil => return true + _, err := contentTyper.ContentTypeForPath(ctx, fs, file) + return err == nil +} diff --git a/pkg/util/watcher/dir_traversal.go b/pkg/util/watcher/dir_traversal.go deleted file mode 100644 index 739ecf78..00000000 --- a/pkg/util/watcher/dir_traversal.go +++ /dev/null @@ -1,60 +0,0 @@ -package watcher - -import ( - "os" - "path/filepath" - "strings" -) - -func (w *FileWatcher) getFiles() ([]string, error) { - return WalkDirectoryForFiles(w.dir, w.opts.ValidExtensions, w.opts.ExcludeDirs) -} - -func (w *FileWatcher) validFile(path string) bool { - return isValidFile(path, w.opts.ValidExtensions, w.opts.ExcludeDirs) -} - -// WalkDirectoryForFiles discovers all subdirectories and -// returns a list of valid files in them -func WalkDirectoryForFiles(dir string, validExts, excludeDirs []string) (files []string, err error) { - err = filepath.Walk(dir, - func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - if !info.IsDir() { - // Only include valid files - if isValidFile(path, validExts, excludeDirs) { - files = append(files, path) - } - } - - return nil - }) - - return -} - -// isValidFile is used to filter out all unsupported -// files based on if their extension is unknown or -// if their path contains an excluded directory -func isValidFile(path string, validExts, excludeDirs []string) bool { - parts := strings.Split(filepath.Clean(path), string(os.PathSeparator)) - ext := filepath.Ext(parts[len(parts)-1]) - for _, suffix := range validExts { - if ext == suffix { - return true - } - } - - for i := 0; i < len(parts)-1; i++ { - for _, exclude := range excludeDirs { - if parts[i] == exclude { - return false - } - } - } - - return false -} From 5b2fffd19b855dcf4f76a8178e8c7f23923ff84d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Tue, 19 Jan 2021 02:30:03 +0200 Subject: [PATCH 041/149] File and Object update event structs have now been moved to pkg/storage/raw/watch. --- pkg/storage/watch/update/event.go | 31 --------------- pkg/storage/watch/update/update.go | 28 ------------- pkg/util/watcher/event.go | 64 ------------------------------ 3 files changed, 123 deletions(-) delete mode 100644 pkg/storage/watch/update/event.go delete mode 100644 pkg/storage/watch/update/update.go delete mode 100644 pkg/util/watcher/event.go diff --git a/pkg/storage/watch/update/event.go b/pkg/storage/watch/update/event.go deleted file mode 100644 index 57367b7d..00000000 --- a/pkg/storage/watch/update/event.go +++ /dev/null @@ -1,31 +0,0 @@ -package update - -import "fmt" - -// ObjectEvent is an enum describing a change in an Object's state. -type ObjectEvent byte - -var _ fmt.Stringer = ObjectEvent(0) - -const ( - ObjectEventNone ObjectEvent = iota // 0 - ObjectEventCreate // 1 - ObjectEventModify // 2 - ObjectEventDelete // 3 -) - -func (o ObjectEvent) String() string { - switch o { - case 0: - return "NONE" - case 1: - return "CREATE" - case 2: - return "MODIFY" - case 3: - return "DELETE" - } - - // Should never happen - return "UNKNOWN" -} diff --git a/pkg/storage/watch/update/update.go b/pkg/storage/watch/update/update.go deleted file mode 100644 index 05ea7e0e..00000000 --- a/pkg/storage/watch/update/update.go +++ /dev/null @@ -1,28 +0,0 @@ -package update - -import ( - "github.com/weaveworks/libgitops/pkg/runtime" - "github.com/weaveworks/libgitops/pkg/storage" -) - -// Update bundles an FileEvent with an -// APIType for Storage retrieval. -type Update struct { - Event ObjectEvent - PartialObject runtime.PartialObject - Storage storage.Storage -} - -// UpdateStream is a channel of updates. -type UpdateStream chan Update - -// EventStorage is a storage that exposes an UpdateStream. -type EventStorage interface { - storage.Storage - - // SetUpdateStream gives the EventStorage a channel to send events to. - // The caller is responsible for choosing a large enough buffer to avoid - // blocking the underlying EventStorage implementation unnecessarily. - // TODO: In the future maybe enable sending events to multiple listeners? - SetUpdateStream(UpdateStream) -} diff --git a/pkg/util/watcher/event.go b/pkg/util/watcher/event.go deleted file mode 100644 index 4da933d7..00000000 --- a/pkg/util/watcher/event.go +++ /dev/null @@ -1,64 +0,0 @@ -package watcher - -import ( - "fmt" - "strings" -) - -// FileEvent is an enum describing a change in a file's state -type FileEvent byte - -const ( - FileEventNone FileEvent = iota // 0 - FileEventModify // 1 - FileEventDelete // 2 - FileEventMove // 3 -) - -func (e FileEvent) String() string { - switch e { - case 0: - return "NONE" - case 1: - return "MODIFY" - case 2: - return "DELETE" - case 3: - return "MOVE" - } - - return "UNKNOWN" -} - -// FileEvents is a slice of FileEvents -type FileEvents []FileEvent - -var _ fmt.Stringer = FileEvents{} - -func (e FileEvents) String() string { - strs := make([]string, 0, len(e)) - for _, ev := range e { - strs = append(strs, ev.String()) - } - - return strings.Join(strs, ",") -} - -func (e FileEvents) Bytes() []byte { - b := make([]byte, 0, len(e)) - for _, event := range e { - b = append(b, byte(event)) - } - - return b -} - -// FileUpdates is a slice of FileUpdate pointers -type FileUpdates []*FileUpdate - -// FileUpdate is used by watchers to -// signal the state change of a file. -type FileUpdate struct { - Event FileEvent - Path string -} From e0481ff00d99e7db1e34f88ec4a006879a747149 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Tue, 19 Jan 2021 02:34:18 +0200 Subject: [PATCH 042/149] Move and refactor the filewatcher implementation. Make it more thread-safe in certain contexts, and make it implement the watch.FileEventsEmitter interface. --- .../raw/watch/inotify}/filewatcher.go | 311 +++++++++++------- .../raw/watch/inotify}/filewatcher_test.go | 43 ++- pkg/storage/raw/watch/inotify/options.go | 68 ++++ 3 files changed, 288 insertions(+), 134 deletions(-) rename pkg/{util/watcher => storage/raw/watch/inotify}/filewatcher.go (55%) rename pkg/{util/watcher => storage/raw/watch/inotify}/filewatcher_test.go (61%) create mode 100644 pkg/storage/raw/watch/inotify/options.go diff --git a/pkg/util/watcher/filewatcher.go b/pkg/storage/raw/watch/inotify/filewatcher.go similarity index 55% rename from pkg/util/watcher/filewatcher.go rename to pkg/storage/raw/watch/inotify/filewatcher.go index 67db3354..0056703d 100644 --- a/pkg/util/watcher/filewatcher.go +++ b/pkg/storage/raw/watch/inotify/filewatcher.go @@ -1,46 +1,28 @@ -package watcher +package inotify import ( + "context" "fmt" - "path" + "path/filepath" + gosync "sync" "time" "github.com/rjeczalik/notify" + "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus" + "github.com/spf13/afero" + "github.com/weaveworks/libgitops/pkg/storage/core" + "github.com/weaveworks/libgitops/pkg/storage/raw/watch" "github.com/weaveworks/libgitops/pkg/util/sync" "golang.org/x/sys/unix" + "k8s.io/apimachinery/pkg/util/sets" ) -const eventBuffer = 4096 // How many events and updates we can buffer before watching is interrupted var listenEvents = []notify.Event{notify.InDelete, notify.InCloseWrite, notify.InMovedFrom, notify.InMovedTo} -var eventMap = map[notify.Event]FileEvent{ - notify.InDelete: FileEventDelete, - notify.InCloseWrite: FileEventModify, -} - -// combinedEvent describes multiple events that should be concatenated into a single event -type combinedEvent struct { - input []notify.Event // input is a slice of events to match (in bytes, it speeds up the comparison) - output int // output is the event's index that should be returned, negative values equal nil -} - -func (c *combinedEvent) match(events notifyEvents) (notify.EventInfo, bool) { - if len(c.input) > len(events) { - return nil, false // Not enough events, cannot match - } - - for i := 0; i < len(c.input); i++ { - if events[i].Event() != c.input[i] { - return nil, false - } - } - - if c.output > 0 { - return events[c.output], true - } - - return nil, true +var eventMap = map[notify.Event]watch.FileEventType{ + notify.InDelete: watch.FileEventDelete, + notify.InCloseWrite: watch.FileEventModify, } // combinedEvents describes the event combinations to concatenate, @@ -54,82 +36,113 @@ var combinedEvents = []combinedEvent{ type notifyEvents []notify.EventInfo type eventStream chan notify.EventInfo -type FileUpdateStream chan *FileUpdate - -// Options specifies options for the FileWatcher -type Options struct { - // ExcludeDirs specifies what directories to not watch - ExcludeDirs []string - // BatchTimeout specifies the duration to wait after last event before dispatching grouped inotify events - BatchTimeout time.Duration - // ValidExtensions specifies what file extensions to look at - ValidExtensions []string -} -// DefaultOptions returns the default options -func DefaultOptions() Options { - return Options{ - ExcludeDirs: []string{".git"}, - BatchTimeout: 1 * time.Second, - ValidExtensions: []string{".yaml", ".yml", ".json"}, - } -} +// FileEvents is a slice of FileEvent pointers +type FileEvents []*watch.FileEvent // NewFileWatcher returns a list of files in the watched directory in // addition to the generated FileWatcher, it can be used to populate // MappedRawStorage fileMappings -func NewFileWatcher(dir string) (w *FileWatcher, files []string, err error) { - return NewFileWatcherWithOptions(dir, DefaultOptions()) -} +func NewFileWatcher(dir string, opts ...FileWatcherOption) (watch.FileEventsEmitter, error) { + o := defaultOptions().ApplyOptions(opts) -// NewFileWatcher returns a list of files in the watched directory in -// addition to the generated FileWatcher, it can be used to populate -// MappedRawStorage fileMappings -func NewFileWatcherWithOptions(dir string, opts Options) (w *FileWatcher, files []string, err error) { - w = &FileWatcher{ - dir: dir, - events: make(eventStream, eventBuffer), - updates: make(FileUpdateStream, eventBuffer), - batcher: sync.NewBatchWriter(opts.BatchTimeout), - opts: opts, + w := &FileWatcher{ + dir: dir, + + inbound: make(eventStream, int(o.EventBufferSize)), + // outbound is set by WatchForFileEvents + outboundMu: &gosync.Mutex{}, + + suspendFiles: sets.NewString(), + suspendFilesMu: &gosync.Mutex{}, + + // monitor and dispatcher set by WatchForFileEvents, guarded by outboundMu + + opts: *o, + // afero operates on the local disk, but is by convention scoped to the local + // directory that is being watched + afero: core.AferoWithoutContext(afero.NewBasePathFs(afero.NewOsFs(), dir)), + + batcher: sync.NewBatchWriter(o.BatchTimeout), } log.Tracef("FileWatcher: Starting recursive watch for %q", dir) - if err = notify.Watch(path.Join(dir, "..."), w.events, listenEvents...); err != nil { - notify.Stop(w.events) - } else if files, err = w.getFiles(); err == nil { - w.monitor = sync.RunMonitor(w.monitorFunc) - w.dispatcher = sync.RunMonitor(w.dispatchFunc) + if err := notify.Watch(filepath.Join(dir, "..."), w.inbound, listenEvents...); err != nil { + notify.Stop(w.inbound) + return nil, err } - return + return w, nil } +var _ watch.FileEventsEmitter = &FileWatcher{} + // FileWatcher recursively monitors changes in files in the given directory // and sends out events based on their state changes. Only files conforming // to validSuffix are monitored. The FileWatcher can be suspended for a single // event at a time to eliminate updates by WatchStorage causing a loop. type FileWatcher struct { - dir string - events eventStream - updates FileUpdateStream - suspendEvent FileEvent - monitor *sync.Monitor - dispatcher *sync.Monitor - opts Options + dir string + // channels + inbound eventStream + outbound watch.FileEventStream + outboundMu *gosync.Mutex + // new suspend logic + suspendFiles sets.String + suspendFilesMu *gosync.Mutex + // goroutines + monitor *sync.Monitor + dispatcher *sync.Monitor + opts FileWatcherOptions + // afero is always the OsFs type, which means it is passing the calls through + // directly to the local disk. It is used when talking to the given ContentTyper + // in order to identify various content types. + afero core.AferoContext // the batcher is used for properly sending many concurrent inotify events // as a group, after a specified timeout. This fixes the issue of one single // file operation being registered as many different inotify events batcher *sync.BatchWriter } +func (w *FileWatcher) ContentTyper() core.ContentTyper { + return w.opts.ContentTyper +} + +func (w *FileWatcher) PathExcluder() core.PathExcluder { + return w.opts.PathExcluder +} + +func (w *FileWatcher) Filesystem() core.AferoContext { + return w.afero +} + +func (w *FileWatcher) WatchForFileEvents(ctx context.Context, into watch.FileEventStream) error { + w.outboundMu.Lock() + defer w.outboundMu.Unlock() + // We don't support more than one listener + // TODO: maybe support many listeners in the future? + if w.outbound != nil { + return fmt.Errorf("FileWatcher: not more than one watch supported: %w", watch.ErrTooManyWatches) + } + w.outbound = into + // Start the backing goroutines + w.monitor = sync.RunMonitor(w.monitorFunc) + w.dispatcher = sync.RunMonitor(w.dispatchFunc) + return nil // all ok +} + +func (w *FileWatcher) validFile(path string) bool { + ctx := context.Background() + return core.IsValidFileInFilesystem(ctx, w.afero, w.opts.ContentTyper, w.opts.PathExcluder, path) +} + func (w *FileWatcher) monitorFunc() { log.Debug("FileWatcher: Monitoring thread started") defer log.Debug("FileWatcher: Monitoring thread stopped") - defer close(w.updates) // Close the update stream after the FileWatcher has stopped + defer close(w.outbound) // Close the update stream after the FileWatcher has stopped for { - event, ok := <-w.events + event, ok := <-w.inbound if !ok { return } @@ -138,17 +151,6 @@ func (w *FileWatcher) monitorFunc() { continue // Skip directories } - if !w.validFile(event.Path()) { - continue // Skip invalid files - } - - updateEvent := convertEvent(event.Event()) - if w.suspendEvent > 0 && updateEvent == w.suspendEvent { - w.suspendEvent = 0 - log.Debugf("FileWatcher: Skipping suspended event %s for path: %q", updateEvent, event.Path()) - continue // Skip the suspended event - } - // Get any events registered for the specific file, and append the specified event var eventList notifyEvents if val, ok := w.batcher.Load(event.Path()); ok { @@ -186,49 +188,81 @@ func (w *FileWatcher) dispatchFunc() { } } -func (w *FileWatcher) sendUpdate(update *FileUpdate) { - log.Debugf("FileWatcher: Sending update: %s -> %q", update.Event, update.Path) - w.updates <- update -} +func (w *FileWatcher) sendUpdate(event *watch.FileEvent) { + // Get the relative path between the root directory and the changed file + relativePath, err := filepath.Rel(w.dir, event.Path) + if err != nil { + logrus.Errorf("FileWatcher: Error occurred when computing relative path between: %s and %s: %v", w.dir, event.Path, err) + return + } + // Replace the full path with the relative path for the signaling upstream + event.Path = relativePath + + if !w.validFile(event.Path) { + return // Skip invalid files + } -// GetFileUpdateStream gets the channel with FileUpdates -func (w *FileWatcher) GetFileUpdateStream() FileUpdateStream { - return w.updates + if w.shouldSuspendEvent(event.Path) { + log.Debugf("FileWatcher: Skipping suspended event %s for path: %q", event.Type, event.Path) + return // Skip the suspended event + } + + log.Debugf("FileWatcher: Sending update: %s -> %q", event.Type, event.Path) + w.outbound <- event } // Close closes active underlying resources -func (w *FileWatcher) Close() { - notify.Stop(w.events) +func (w *FileWatcher) Close() error { + notify.Stop(w.inbound) w.batcher.Close() - close(w.events) // Close the event stream + close(w.inbound) // Close the inbound event stream w.monitor.Wait() w.dispatcher.Wait() + return nil } -// Suspend enables a one-time suspend of the given event, -// the FileWatcher will skip the given event once -func (w *FileWatcher) Suspend(updateEvent FileEvent) { - w.suspendEvent = updateEvent +// Suspend enables a one-time suspend of the given path +// TODO: clarify how the path should be formatted +func (w *FileWatcher) Suspend(_ context.Context, path string) { + //w.suspendEvent = updateEvent + w.suspendFilesMu.Lock() + defer w.suspendFilesMu.Unlock() + w.suspendFiles.Insert(path) } -func convertEvent(event notify.Event) FileEvent { +// shouldSuspendEvent checks if an event for the given path +// should be suspended for one time. If it should, true will +// be returned, and the mapping will be removed next time. +func (w *FileWatcher) shouldSuspendEvent(path string) bool { + w.suspendFilesMu.Lock() + defer w.suspendFilesMu.Unlock() + // If the path should not be suspended, just return false and be done + if !w.suspendFiles.Has(path) { + return false + } + // Otherwise, remove it from the list and mark it as suspended + w.suspendFiles.Delete(path) + return true +} + +func convertEvent(event notify.Event) watch.FileEventType { if updateEvent, ok := eventMap[event]; ok { return updateEvent } - return FileEventNone + return watch.FileEventNone } -func convertUpdate(event notify.EventInfo) *FileUpdate { +func convertUpdate(event notify.EventInfo) *watch.FileEvent { fileEvent := convertEvent(event.Event()) - if fileEvent == FileEventNone { + if fileEvent == watch.FileEventNone { // This should never happen panic(fmt.Sprintf("invalid event for update conversion: %q", event.Event().String())) } - return &FileUpdate{ - Event: fileEvent, - Path: event.Path(), + return &watch.FileEvent{ + Path: event.Path(), + Type: fileEvent, } } @@ -260,42 +294,53 @@ func (m *moveCache) cookie() uint32 { // if only one is received, the file is moved in/out of a watched directory, which // is treated as a normal creation/deletion by this method. func (m *moveCache) incomplete() { - var event FileEvent + var evType watch.FileEventType switch m.event.Event() { case notify.InMovedFrom: - event = FileEventDelete + evType = watch.FileEventDelete case notify.InMovedTo: - event = FileEventModify + evType = watch.FileEventModify default: // This should never happen panic(fmt.Sprintf("moveCache: unrecognized event: %v", m.event.Event())) } log.Tracef("moveCache: Timer expired for %d, dispatching...", m.cookie()) - m.watcher.sendUpdate(&FileUpdate{event, m.event.Path()}) + m.watcher.sendUpdate(&watch.FileEvent{Path: m.event.Path(), Type: evType}) // Delete the cache after the timer has fired + moveCachesMu.Lock() delete(moveCaches, m.cookie()) + moveCachesMu.Unlock() } func (m *moveCache) cancel() { m.timer.Stop() + moveCachesMu.Lock() delete(moveCaches, m.cookie()) + moveCachesMu.Unlock() log.Tracef("moveCache: Dispatching cancelled for %d", m.cookie()) } -// moveCaches keeps track of active moves by cookie -var moveCaches = make(map[uint32]*moveCache) +var ( + // moveCaches keeps track of active moves by cookie + moveCaches = make(map[uint32]*moveCache) + moveCachesMu = &gosync.RWMutex{} +) // move processes InMovedFrom and InMovedTo events in any order // and dispatches FileUpdates when a move is detected -func (w *FileWatcher) move(event notify.EventInfo) (moveUpdate *FileUpdate) { +func (w *FileWatcher) move(event notify.EventInfo) (moveUpdate *watch.FileEvent) { cookie := ievent(event).Cookie + moveCachesMu.RLock() cache, ok := moveCaches[cookie] + moveCachesMu.RUnlock() if !ok { // The cookie is not cached, create a new cache object for it + moveCachesMu.Lock() moveCaches[cookie] = w.newMoveCache(event) + moveCachesMu.Unlock() return } @@ -305,8 +350,8 @@ func (w *FileWatcher) move(event notify.EventInfo) (moveUpdate *FileUpdate) { sourcePath, destPath = destPath, sourcePath fallthrough case notify.InMovedTo: - cache.cancel() // Cancel dispatching the cache's incomplete move - moveUpdate = &FileUpdate{FileEventMove, destPath} // Register an internal, complete move instead + cache.cancel() // Cancel dispatching the cache's incomplete move + moveUpdate = &watch.FileEvent{Path: destPath, Type: watch.FileEventMove} // Register an internal, complete move instead log.Tracef("FileWatcher: Detected move: %q -> %q", sourcePath, destPath) } @@ -315,8 +360,8 @@ func (w *FileWatcher) move(event notify.EventInfo) (moveUpdate *FileUpdate) { // concatenateEvents takes in a slice of events and concatenates // all events possible based on combinedEvents. It also manages -// file moving and conversion from notifyEvents to FileUpdates -func (w *FileWatcher) concatenateEvents(events notifyEvents) FileUpdates { +// file moving and conversion from notifyEvents to FileEvents +func (w *FileWatcher) concatenateEvents(events notifyEvents) FileEvents { for _, combinedEvent := range combinedEvents { // Test if the prefix of the given events matches combinedEvent.input if event, ok := combinedEvent.match(events); ok { @@ -332,7 +377,7 @@ func (w *FileWatcher) concatenateEvents(events notifyEvents) FileUpdates { } // Convert the events to updates - updates := make(FileUpdates, 0, len(events)) + updates := make(FileEvents, 0, len(events)) for _, event := range events { switch event.Event() { case notify.InMovedFrom, notify.InMovedTo: @@ -352,3 +397,27 @@ func (w *FileWatcher) concatenateEvents(events notifyEvents) FileUpdates { func ievent(event notify.EventInfo) *unix.InotifyEvent { return event.Sys().(*unix.InotifyEvent) } + +// combinedEvent describes multiple events that should be concatenated into a single event +type combinedEvent struct { + input []notify.Event // input is a slice of events to match (in bytes, it speeds up the comparison) + output int // output is the event's index that should be returned, negative values equal nil +} + +func (c *combinedEvent) match(events notifyEvents) (notify.EventInfo, bool) { + if len(c.input) > len(events) { + return nil, false // Not enough events, cannot match + } + + for i := 0; i < len(c.input); i++ { + if events[i].Event() != c.input[i] { + return nil, false + } + } + + if c.output > 0 { + return events[c.output], true + } + + return nil, true +} diff --git a/pkg/util/watcher/filewatcher_test.go b/pkg/storage/raw/watch/inotify/filewatcher_test.go similarity index 61% rename from pkg/util/watcher/filewatcher_test.go rename to pkg/storage/raw/watch/inotify/filewatcher_test.go index b80f9b26..620c139d 100644 --- a/pkg/util/watcher/filewatcher_test.go +++ b/pkg/storage/raw/watch/inotify/filewatcher_test.go @@ -1,9 +1,12 @@ -package watcher +package inotify import ( + "fmt" + "strings" "testing" "github.com/rjeczalik/notify" + "github.com/weaveworks/libgitops/pkg/storage/raw/watch" "golang.org/x/sys/unix" ) @@ -51,33 +54,33 @@ var testEvents = []notifyEvents{ }, } -var targets = []FileEvents{ +var targets = []FileEventTypes{ { - FileEventModify, + watch.FileEventModify, }, { - FileEventDelete, + watch.FileEventDelete, }, { - FileEventModify, - FileEventMove, - FileEventDelete, + watch.FileEventModify, + watch.FileEventMove, + watch.FileEventDelete, }, { - FileEventModify, + watch.FileEventModify, }, {}, } -func extractEvents(updates FileUpdates) (events FileEvents) { - for _, update := range updates { - events = append(events, update.Event) +func extractEventTypes(events FileEvents) (eventTypes FileEventTypes) { + for _, event := range events { + eventTypes = append(eventTypes, event.Type) } return } -func eventsEqual(a, b FileEvents) bool { +func eventsEqual(a, b FileEventTypes) bool { if len(a) != len(b) { return false } @@ -91,9 +94,23 @@ func eventsEqual(a, b FileEvents) bool { return true } +// FileEventTypes is a slice of FileEventType +type FileEventTypes []watch.FileEventType + +var _ fmt.Stringer = FileEventTypes{} + +func (e FileEventTypes) String() string { + strs := make([]string, 0, len(e)) + for _, ev := range e { + strs = append(strs, ev.String()) + } + + return strings.Join(strs, ",") +} + func TestEventConcatenation(t *testing.T) { for i, e := range testEvents { - result := extractEvents((&FileWatcher{}).concatenateEvents(e)) + result := extractEventTypes((&FileWatcher{}).concatenateEvents(e)) if !eventsEqual(result, targets[i]) { t.Errorf("wrong concatenation result: %v != %v", result, targets[i]) } diff --git a/pkg/storage/raw/watch/inotify/options.go b/pkg/storage/raw/watch/inotify/options.go new file mode 100644 index 00000000..796c7bd1 --- /dev/null +++ b/pkg/storage/raw/watch/inotify/options.go @@ -0,0 +1,68 @@ +package inotify + +import ( + "time" + + "github.com/weaveworks/libgitops/pkg/storage/core" +) + +// How many inotify events we can buffer before watching is interrupted +const DefaultEventBufferSize int32 = 4096 + +type FileWatcherOption interface { + ApplyToFileWatcher(*FileWatcherOptions) +} + +var _ FileWatcherOption = &FileWatcherOptions{} + +// Options specifies options for the FileWatcher +type FileWatcherOptions struct { + // PathExcluder specifies what files and directories to ignore + // Default: ExcludeGitDirectory{} + PathExcluder core.PathExcluder + // BatchTimeout specifies the duration to wait after last event + // before dispatching grouped inotify events + // Default: 1s + BatchTimeout time.Duration + // ContentTyper specifies what content types to recognize. + // All files for which ContentTyper returns a nil error will + // be watched. + // Default: core.DefaultContentTyper + ContentTyper core.ContentTyper + // EventBufferSize describes how many inotify events can be buffered + // before watching is interrupted/delayed. + // Default: DefaultEventBufferSize + EventBufferSize int32 +} + +func (o *FileWatcherOptions) ApplyToFileWatcher(target *FileWatcherOptions) { + if o.PathExcluder != nil { + target.PathExcluder = o.PathExcluder + } + if o.BatchTimeout != 0 { + target.BatchTimeout = o.BatchTimeout + } + if o.ContentTyper != nil { + target.ContentTyper = o.ContentTyper + } + if o.EventBufferSize != 0 { + target.EventBufferSize = o.EventBufferSize + } +} + +func (o *FileWatcherOptions) ApplyOptions(opts []FileWatcherOption) *FileWatcherOptions { + for _, opt := range opts { + opt.ApplyToFileWatcher(o) + } + return o +} + +// defaultOptions returns the default options +func defaultOptions() *FileWatcherOptions { + return &FileWatcherOptions{ + PathExcluder: core.ExcludeGitDirectory{}, + BatchTimeout: 1 * time.Second, + ContentTyper: core.DefaultContentTyper, + EventBufferSize: DefaultEventBufferSize, + } +} From a88a1b1575b5032069c989c1d7d80b5ecab60400 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Tue, 19 Jan 2021 02:42:50 +0200 Subject: [PATCH 043/149] Refactor the WatchStorage implementation into an UnstructuredEventStorage implementation. --- pkg/storage/raw/watch/watch.go | 326 +++++++++++++++++++++++++++++++++ pkg/storage/watch/storage.go | 244 ------------------------ 2 files changed, 326 insertions(+), 244 deletions(-) create mode 100644 pkg/storage/raw/watch/watch.go delete mode 100644 pkg/storage/watch/storage.go diff --git a/pkg/storage/raw/watch/watch.go b/pkg/storage/raw/watch/watch.go new file mode 100644 index 00000000..a4ce3bd8 --- /dev/null +++ b/pkg/storage/raw/watch/watch.go @@ -0,0 +1,326 @@ +package watch + +import ( + "context" + "errors" + "fmt" + gosync "sync" + + "github.com/sirupsen/logrus" + "github.com/weaveworks/libgitops/pkg/storage/core" + "github.com/weaveworks/libgitops/pkg/storage/raw" + "github.com/weaveworks/libgitops/pkg/util/sync" +) + +const defaultEventsBufferSize = 4096 + +// NewGenericUnstructuredEventStorage is an extended Storage implementation, which +// together with the provided ObjectRecognizer and FileEventsEmitter listens for +// file events, keeps the mappings of the FilesystemStorage's MappedFileFinder +// in sync (s must use the mapped variant), and sends high-level ObjectEvents +// upstream. +// +// Note: This WatchStorage only works for one-frame files (i.e. only one YAML document +// per file is supported). +func NewGenericUnstructuredEventStorage( + s raw.FilesystemStorage, + recognizer core.ObjectRecognizer, + emitter FileEventsEmitter, + syncInBeginning bool, +) (UnstructuredEventStorage, error) { + // TODO: Possibly relax this requirement later, maybe it can also work for the SimpleFileFinder? + fileFinder, ok := s.FileFinder().(raw.MappedFileFinder) + if !ok { + return nil, errors.New("the given FilesystemStorage must use a MappedFileFinder") + } + + return &GenericUnstructuredEventStorage{ + FilesystemStorage: s, + recognizer: recognizer, + fileFinder: fileFinder, + emitter: emitter, + + inbound: make(FileEventStream, defaultEventsBufferSize), + // outbound set by WatchForObjectEvents + outboundMu: &gosync.Mutex{}, + + // monitor set by WatchForObjectEvents, guarded by outboundMu + + syncInBeginning: syncInBeginning, + }, nil +} + +// GenericUnstructuredEventStorage is an extended raw.Storage implementation, which provides a watcher +// for watching changes in the directory managed by the embedded Storage's RawStorage. +// If the RawStorage is a MappedRawStorage instance, it's mappings will automatically +// be updated by the WatchStorage. Update events are sent to the given event stream. +// Note: This WatchStorage only works for one-frame files (i.e. only one YAML document +// per file is supported). +type GenericUnstructuredEventStorage struct { + raw.FilesystemStorage + // the recognizer recognizes files + recognizer core.ObjectRecognizer + // mapped file finder + fileFinder raw.MappedFileFinder + // the filesystem events emitter + emitter FileEventsEmitter + + // channels + inbound FileEventStream + outbound ObjectEventStream + outboundMu *gosync.Mutex + + // goroutine + monitor *sync.Monitor + + // opts + syncInBeginning bool +} + +func (s *GenericUnstructuredEventStorage) ObjectRecognizer() core.ObjectRecognizer { + return s.recognizer +} + +func (s *GenericUnstructuredEventStorage) FileEventsEmitter() FileEventsEmitter { + return s.emitter +} + +func (s *GenericUnstructuredEventStorage) MappedFileFinder() raw.MappedFileFinder { + return s.fileFinder +} + +func (s *GenericUnstructuredEventStorage) WatchForObjectEvents(ctx context.Context, into ObjectEventStream) error { + s.outboundMu.Lock() + defer s.outboundMu.Unlock() + // We don't support more than one listener + // TODO: maybe support many listeners in the future? + if s.outbound != nil { + return fmt.Errorf("WatchStorage: not more than one watch supported: %w", ErrTooManyWatches) + } + // Hook up our inbound channel to the emitter, to make the pipeline functional + if err := s.emitter.WatchForFileEvents(ctx, s.inbound); err != nil { + return err + } + // Set outbound at this stage so Sync possibly can send events. + s.outbound = into + // Start the backing goroutines + s.monitor = sync.RunMonitor(s.monitorFunc) + + // Do a full sync in the beginning only if asked. Be aware that without running a Sync + // at all before events start happening, the reporting might not work as it should + if s.syncInBeginning { + if err := s.Sync(ctx); err != nil { + return err + } + } + return nil // all ok +} + +func (s *GenericUnstructuredEventStorage) Sync(ctx context.Context) error { + // List all valid files in the fs + files, err := core.ListValidFilesInFilesystem( + ctx, + s.emitter.Filesystem(), + s.emitter.ContentTyper(), + s.emitter.PathExcluder(), + ) + if err != nil { + return err + } + + // Send SYNC events for all files (and fill the mappings + // of the MappedRawStorage) before starting to monitor changes + for _, file := range files { + // TODO: when checksum support is added to setMapping, we can skip + // reading such files which already have an up-to-date checksum. + // TODO: Alternatively/also, we should support feeding an + // UnstructuredStorage, so that we can run its Sync() function instead + + content, err := s.Filesystem().ReadFile(ctx, file) + if err != nil { + logrus.Warnf("Ignoring %q: %v", file, err) + continue + } + + id, err := s.recognizer.ResolveObjectID(ctx, file, content) + if err != nil { + logrus.Warnf("Could not recognize object ID in %q: %v", file, err) + continue + } + + // Add a mapping between this object and path + s.setMapping(ctx, id, file) + // Send a special "sync" event for this ObjectID to the events channel + s.sendEvent(ObjectEventSync, id) + } + + return nil +} + +// Write writes the given content to the resource indicated by the ID. +// Error returns are implementation-specific. +func (s *GenericUnstructuredEventStorage) Write(ctx context.Context, id core.UnversionedObjectID, content []byte) error { + // Get the path + p, err := s.getPath(ctx, id) + if err != nil { + return err + } + // Suspend the write event + s.emitter.Suspend(ctx, p) + // Call the underlying raw.Storage + return s.FilesystemStorage.Write(ctx, id, content) +} + +// Delete deletes the resource indicated by the ID. +// If the resource does not exist, it returns ErrNotFound. +func (s *GenericUnstructuredEventStorage) Delete(ctx context.Context, id core.UnversionedObjectID) error { + // Get the path + p, err := s.getPath(ctx, id) + if err != nil { + return err + } + // Suspend the write event + s.emitter.Suspend(ctx, p) + // Call the underlying raw.Storage + return s.FilesystemStorage.Delete(ctx, id) +} + +func (s *GenericUnstructuredEventStorage) getPath(ctx context.Context, id core.UnversionedObjectID) (string, error) { + // Get namespacing info + namespaced, err := s.Namespacer().IsNamespaced(id.GroupKind()) + if err != nil { + return "", err + } + // Get the path + return s.FileFinder().ObjectPath(ctx, s.Filesystem(), id, namespaced) +} + +func (s *GenericUnstructuredEventStorage) Close() error { + err := s.emitter.Close() + s.monitor.Wait() + return err +} + +func (s *GenericUnstructuredEventStorage) monitorFunc() { + logrus.Debug("WatchStorage: Monitoring thread started") + defer logrus.Debug("WatchStorage: Monitoring thread stopped") + + ctx := context.Background() + + for { + // TODO: handle context cancellations, i.e. ctx.Done() + event, ok := <-s.inbound + if !ok { + logrus.Error("WatchStorage: Fatal: Got non-ok response from watcher.GetFileEventStream()") + return + } + + logrus.Tracef("WatchStorage: Processing event: %s", event.Type) + + var err error + switch event.Type { + // FileEventModify is also sent for newly-created files + case FileEventModify, FileEventMove: + err = s.handleModifyMove(ctx, event) + case FileEventDelete: + err = s.handleDelete(ctx, event) + default: + err = fmt.Errorf("cannot handle update of type %v for path %q", event.Type, event.Path) + } + if err != nil { + logrus.Errorf("WatchStorage: %v", err) + } + } +} + +func (s *GenericUnstructuredEventStorage) handleDelete(ctx context.Context, event *FileEvent) error { + // The object is deleted, so we need to do a reverse-lookup of what kind of object + // was there earlier, based on the path. This assumes that the filefinder organizes + // the known objects in such a way that it is able to do the reverse-lookup. For + // mapped FileFinders, by this point the path should still be in the local cache, + // which should make us able to get the ID before deleted from the cache. + objectID, err := s.fileFinder.ObjectAt(ctx, s.Filesystem(), event.Path) + if err != nil { + return fmt.Errorf("failed to reverse lookup ID for deleted file %q: %v", event.Path, err) + } + + // Remove the mapping from the FileFinder cache for this ID as it's now deleted + s.deleteMapping(ctx, objectID) + // Send the delete event to the channel + s.sendEvent(ObjectEventDelete, objectID) + return nil +} + +func (s *GenericUnstructuredEventStorage) handleModifyMove(ctx context.Context, event *FileEvent) error { + // Read the content of this modified, moved or created file + content, err := s.Filesystem().ReadFile(ctx, event.Path) + if err != nil { + return fmt.Errorf("could not read %q: %v", event.Path, err) + } + + // Try to recognize the object + versionedID, err := s.recognizer.ResolveObjectID(ctx, event.Path, content) + if err != nil { + return fmt.Errorf("did not recognize object at path %q: %v", event.Path, err) + } + + // If the file was just moved around, just overwrite the earlier mapping + if event.Type == FileEventMove { + s.setMapping(ctx, versionedID, event.Path) + + // Internal move events are a no-op + return nil + } + + // Determine if this object already existed in the fileFinder's cache, + // in order to find out if the object was created or modified (default). + // TODO: In the future, maybe support multiple files pointing to the same + // ObjectID? Case in point here is e.g. a Modify event for a known path that + // changes the underlying ObjectID. + objectEvent := ObjectEventUpdate + // Set the mapping if it didn't exist before; assume this is a Create event + if _, ok := s.fileFinder.GetMapping(ctx, versionedID); !ok { + // Add a mapping between this object and path. + s.setMapping(ctx, versionedID, event.Path) + + // This is what actually determines if an Object is created, + // so update the event to update.ObjectEventCreate here + objectEvent = ObjectEventCreate + } + // Send the event to the channel + s.sendEvent(objectEvent, versionedID) + return nil +} + +func (s *GenericUnstructuredEventStorage) sendEvent(event ObjectEventType, id core.UnversionedObjectID) { + logrus.Tracef("GenericUnstructuredEventStorage: Sending event: %v", event) + s.outbound <- &ObjectEvent{ + ID: id, + Type: event, + } +} + +// setMapping registers a mapping between the given object and the specified path, if raw is a +// MappedRawStorage. If a given mapping already exists between this object and some path, it +// will be overridden with the specified new path +func (s *GenericUnstructuredEventStorage) setMapping(ctx context.Context, id core.UnversionedObjectID, path string) { + /*oi, err := s.FilesystemStorage.Stat(ctx, id) + if err != nil { + logrus.Errorf("WatchStorage: Got error when Stat-ing object with id %v: %v", id, err) + return + }*/ + + // TODO: Support working with other MappedFileFinder users simultaneously, and start populating + // the checksum accordingly, by using Stat like above, but taking into account that there might + // not be a previous mapping, in which case one needs to create that first. + + s.fileFinder.SetMapping(ctx, id, raw.ChecksumPath{ + Path: path, + //Checksum: oi.Checksum(), + }) +} + +// deleteMapping removes a mapping a file that doesn't exist +func (s *GenericUnstructuredEventStorage) deleteMapping(ctx context.Context, id core.UnversionedObjectID) { + s.fileFinder.DeleteMapping(ctx, id) +} diff --git a/pkg/storage/watch/storage.go b/pkg/storage/watch/storage.go deleted file mode 100644 index f3d7b0bb..00000000 --- a/pkg/storage/watch/storage.go +++ /dev/null @@ -1,244 +0,0 @@ -package watch - -import ( - "io/ioutil" - - log "github.com/sirupsen/logrus" - "github.com/weaveworks/libgitops/pkg/runtime" - "github.com/weaveworks/libgitops/pkg/serializer" - "github.com/weaveworks/libgitops/pkg/storage" - "github.com/weaveworks/libgitops/pkg/storage/watch/update" - "github.com/weaveworks/libgitops/pkg/util/sync" - "github.com/weaveworks/libgitops/pkg/util/watcher" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" -) - -// NewManifestStorage returns a pre-configured GenericWatchStorage backed by a storage.GenericStorage, -// and a GenericMappedRawStorage for the given manifestDir and Serializer. This should be sufficient -// for most users that want to watch changes in a directory with manifests. -func NewManifestStorage(manifestDir string, ser serializer.Serializer) (update.EventStorage, error) { - return NewGenericWatchStorage( - storage.NewGenericStorage( - storage.NewGenericMappedRawStorage(manifestDir), - ser, - []runtime.IdentifierFactory{runtime.Metav1NameIdentifier}, - ), - ) -} - -// NewGenericWatchStorage is an extended Storage implementation, which provides a watcher -// for watching changes in the directory managed by the embedded Storage's RawStorage. -// If the RawStorage is a MappedRawStorage instance, it's mappings will automatically -// be updated by the WatchStorage. Update events are sent to the given event stream. -// Note: This WatchStorage only works for one-frame files (i.e. only one YAML document -// per file is supported). -func NewGenericWatchStorage(s storage.Storage) (update.EventStorage, error) { - ws := &GenericWatchStorage{ - Storage: s, - } - - var err error - var files []string - if ws.watcher, files, err = watcher.NewFileWatcher(s.RawStorage().WatchDir()); err != nil { - return nil, err - } - - ws.monitor = sync.RunMonitor(func() { - ws.monitorFunc(ws.RawStorage(), files) // Offload the file registration to the goroutine - }) - - return ws, nil -} - -// EventDeleteObjectName is used as the name of an object sent to the -// GenericWatchStorage's event stream when the the object has been deleted -const EventDeleteObjectName = "" - -// GenericWatchStorage implements the WatchStorage interface -type GenericWatchStorage struct { - storage.Storage - watcher *watcher.FileWatcher - events update.UpdateStream - monitor *sync.Monitor -} - -var _ update.EventStorage = &GenericWatchStorage{} - -// Suspend modify events during Create -func (s *GenericWatchStorage) Create(obj runtime.Object) error { - s.watcher.Suspend(watcher.FileEventModify) - return s.Storage.Create(obj) -} - -// Suspend modify events during Update -func (s *GenericWatchStorage) Update(obj runtime.Object) error { - s.watcher.Suspend(watcher.FileEventModify) - return s.Storage.Update(obj) -} - -// Suspend modify events during Patch -func (s *GenericWatchStorage) Patch(key storage.ObjectKey, patch []byte) error { - s.watcher.Suspend(watcher.FileEventModify) - return s.Storage.Patch(key, patch) -} - -// Suspend delete events during Delete -func (s *GenericWatchStorage) Delete(key storage.ObjectKey) error { - s.watcher.Suspend(watcher.FileEventDelete) - return s.Storage.Delete(key) -} - -func (s *GenericWatchStorage) SetUpdateStream(eventStream update.UpdateStream) { - s.events = eventStream -} - -func (s *GenericWatchStorage) Close() error { - s.watcher.Close() - s.monitor.Wait() - return nil -} - -func (s *GenericWatchStorage) monitorFunc(raw storage.RawStorage, files []string) { - log.Debug("GenericWatchStorage: Monitoring thread started") - defer log.Debug("GenericWatchStorage: Monitoring thread stopped") - var content []byte - - // Send a MODIFY event for all files (and fill the mappings - // of the MappedRawStorage) before starting to monitor changes - for _, file := range files { - content, err := ioutil.ReadFile(file) - if err != nil { - log.Warnf("Ignoring %q: %v", file, err) - continue - } - - obj, err := runtime.NewPartialObject(content) - if err != nil { - log.Warnf("Ignoring %q: %v", file, err) - continue - } - - // Add a mapping between this object and path - s.addMapping(raw, obj, file) - // Send the event to the events channel - s.sendEvent(update.ObjectEventModify, obj) - } - - for { - if event, ok := <-s.watcher.GetFileUpdateStream(); ok { - var partObj runtime.PartialObject - var err error - - var objectEvent update.ObjectEvent - switch event.Event { - case watcher.FileEventModify: - objectEvent = update.ObjectEventModify - case watcher.FileEventDelete: - objectEvent = update.ObjectEventDelete - } - - log.Tracef("GenericWatchStorage: Processing event: %s", event.Event) - if event.Event == watcher.FileEventDelete { - key, err := raw.GetKey(event.Path) - if err != nil { - log.Warnf("Failed to retrieve data for %q: %v", event.Path, err) - continue - } - - // This creates a "fake" Object from the key to be used for - // deletion, as the original has already been removed from disk - apiVersion, kind := key.GetGVK().ToAPIVersionAndKind() - partObj = &runtime.PartialObjectImpl{ - TypeMeta: metav1.TypeMeta{ - APIVersion: apiVersion, - Kind: kind, - }, - ObjectMeta: metav1.ObjectMeta{ - Name: EventDeleteObjectName, - // TODO: This doesn't take into account where e.g. the identifier is "{namespace}/{name}" - UID: types.UID(key.GetIdentifier()), - }, - } - // remove the mapping for this key as it's now deleted - s.removeMapping(raw, key) - } else { - content, err = ioutil.ReadFile(event.Path) - if err != nil { - log.Warnf("Ignoring %q: %v", event.Path, err) - continue - } - - if partObj, err = runtime.NewPartialObject(content); err != nil { - log.Warnf("Ignoring %q: %v", event.Path, err) - continue - } - - if event.Event == watcher.FileEventMove { - // Update the mappings for the moved file (AddMapping overwrites) - s.addMapping(raw, partObj, event.Path) - - // Internal move events are a no-op - continue - } - - // This is based on the key's existence instead of watcher.EventCreate, - // as Objects can get updated (via watcher.FileEventModify) to be conformant - if _, err = raw.GetKey(event.Path); err != nil { - // Add a mapping between this object and path - s.addMapping(raw, partObj, event.Path) - - // This is what actually determines if an Object is created, - // so update the event to update.ObjectEventCreate here - objectEvent = update.ObjectEventCreate - } - } - - // Send the objectEvent to the events channel - if objectEvent != update.ObjectEventNone { - s.sendEvent(objectEvent, partObj) - } - } else { - return - } - } -} - -func (s *GenericWatchStorage) sendEvent(event update.ObjectEvent, partObj runtime.PartialObject) { - if s.events != nil { - log.Tracef("GenericWatchStorage: Sending event: %v", event) - s.events <- update.Update{ - Event: event, - PartialObject: partObj, - Storage: s, - } - } -} - -// addMapping registers a mapping between the given object and the specified path, if raw is a -// MappedRawStorage. If a given mapping already exists between this object and some path, it -// will be overridden with the specified new path -func (s *GenericWatchStorage) addMapping(raw storage.RawStorage, obj runtime.Object, file string) { - mapped, ok := raw.(storage.MappedRawStorage) - if !ok { - return - } - - // Let the embedded storage decide using its identifiers how to - key, err := s.Storage.ObjectKeyFor(obj) - if err != nil { - log.Errorf("couldn't get object key for: gvk=%s, uid=%s, name=%s", obj.GetObjectKind().GroupVersionKind(), obj.GetUID(), obj.GetName()) - } - - mapped.AddMapping(key, file) -} - -// removeMapping removes a mapping a file that doesn't exist -func (s *GenericWatchStorage) removeMapping(raw storage.RawStorage, key storage.ObjectKey) { - mapped, ok := raw.(storage.MappedRawStorage) - if !ok { - return - } - - mapped.RemoveMapping(key) -} From a3fa3dc6340c0fc94ad103aeb630b2e205896809 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Tue, 19 Jan 2021 02:45:49 +0200 Subject: [PATCH 044/149] Create a "manifest" helper constructor. --- pkg/storage/raw/watch/manifest/manifest.go | 33 ++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 pkg/storage/raw/watch/manifest/manifest.go diff --git a/pkg/storage/raw/watch/manifest/manifest.go b/pkg/storage/raw/watch/manifest/manifest.go new file mode 100644 index 00000000..b9728c78 --- /dev/null +++ b/pkg/storage/raw/watch/manifest/manifest.go @@ -0,0 +1,33 @@ +package manifest + +import ( + "github.com/weaveworks/libgitops/pkg/storage/core" + "github.com/weaveworks/libgitops/pkg/storage/raw" + "github.com/weaveworks/libgitops/pkg/storage/raw/watch" + "github.com/weaveworks/libgitops/pkg/storage/raw/watch/inotify" +) + +// NewManifestStorage is a high-level constructor for a generic +// MappedFileFinder and FilesystemStorage, together with a +// inotify FileWatcher; all combined into an UnstructuredEventStorage. +func NewManifestStorage( + dir string, + contentTyper core.ContentTyper, + namespacer core.Namespacer, + recognizer core.ObjectRecognizer, + pathExcluder core.PathExcluder, +) (watch.UnstructuredEventStorage, error) { + fileFinder := raw.NewGenericMappedFileFinder(contentTyper) + fsRaw, err := raw.NewGenericFilesystemStorage(dir, fileFinder, namespacer) + if err != nil { + return nil, err + } + emitter, err := inotify.NewFileWatcher(dir, &inotify.FileWatcherOptions{ + ContentTyper: contentTyper, + PathExcluder: pathExcluder, + }) + if err != nil { + return nil, err + } + return watch.NewGenericUnstructuredEventStorage(fsRaw, recognizer, emitter, true) +} From 4a2e70a3e57aad26d6c6bae257c6d36e0a9ed6ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Tue, 19 Jan 2021 02:47:08 +0200 Subject: [PATCH 045/149] Snapshot latest modifications to pkg/storage --- pkg/storage/storage.go | 164 +++++++++++++++++++---------------------- pkg/storage/utils.go | 23 ++++++ 2 files changed, 100 insertions(+), 87 deletions(-) create mode 100644 pkg/storage/utils.go diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index 2e293416..e8133fca 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -12,6 +12,8 @@ import ( "github.com/weaveworks/libgitops/pkg/filter" "github.com/weaveworks/libgitops/pkg/runtime" "github.com/weaveworks/libgitops/pkg/serializer" + "github.com/weaveworks/libgitops/pkg/storage/core" + "github.com/weaveworks/libgitops/pkg/storage/raw" patchutil "github.com/weaveworks/libgitops/pkg/util/patch" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -22,16 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -type NewObjectFunc func() (Object, error) - var ( - // TODO: Return the same errors as k8s does - // ErrAmbiguousFind is returned when the user requested one object from a List+Filter process. - ErrAmbiguousFind = errors.New("two or more results were aquired when one was expected") - // ErrNotFound is returned when the requested resource wasn't found. - ErrNotFound = errors.New("resource not found") - // ErrAlreadyExists is returned when when WriteStorage.Create is called for an already stored object. - ErrAlreadyExists = errors.New("resource already exists") // ErrCannotSaveMetadata is returned if the user tries to save metadata-only objects ErrCannotSaveMetadata = errors.New("cannot save (Create|Update|Patch) *metav1.PartialObjectMetadata") // ErrNameRequired is returned when .metadata.name is unset @@ -47,17 +40,6 @@ const ( var v1GroupKind = schema.GroupVersion{Group: "", Version: "v1"} -type ObjectID interface { - GroupVersionKind() schema.GroupVersionKind - GetName() string - GetNamespace() string - GetLabels() map[string]string -} - -func foo() { - var _ ObjectID = &metav1.PartialObjectMetadata{} -} - type CommonStorage interface { // // Access to underlying Resources. @@ -65,7 +47,7 @@ type CommonStorage interface { // RawStorage returns the RawStorage instance backing this Storage // It is expected that RawStorage only operates on one "frame" at a time in its Read/Write operations. - RawStorage() RawStorage + RawStorage() raw.Storage // Serializer returns the serializer Serializer() serializer.Serializer @@ -74,6 +56,7 @@ type CommonStorage interface { // // Close closes all underlying resources (e.g. goroutines) used; before the application exits + // TODO: Maybe this instead should apply to raw.Storage's now? Close() error } @@ -104,40 +87,45 @@ type Storage interface { } // NewGenericStorage constructs a new Storage -func NewGenericStorage(rawStorage RawStorage, serializer serializer.Serializer, enforcer NamespaceEnforcer) Storage { - return &GenericStorage{rawStorage, serializer, enforcer} +func NewGenericStorage(rawStorage raw.Storage, serializer serializer.Serializer, enforcer core.NamespaceEnforcer) Storage { + return &storage{rawStorage, serializer, enforcer} } -// GenericStorage implements the Storage interface -type GenericStorage struct { - raw RawStorage +// storage implements the Storage interface +type storage struct { + raw raw.Storage serializer serializer.Serializer - enforcer NamespaceEnforcer + enforcer core.NamespaceEnforcer } -var _ Storage = &GenericStorage{} +var _ Storage = &storage{} -func (s *GenericStorage) Serializer() serializer.Serializer { +func (s *storage) Serializer() serializer.Serializer { return s.serializer } // Get returns a new Object for the resource at the specified kind/uid path, based on the file content. // In order to only extract the metadata of this object, pass in a *metav1.PartialObjectMetadata -func (s *GenericStorage) Get(ctx context.Context, name NamespacedName, obj Object) error { +func (s *storage) Get(ctx context.Context, key core.ObjectKey, obj core.Object) error { gvk, err := serializer.GVKForObject(s.serializer.Scheme(), obj) if err != nil { return err } - key := NewObjectKey(gvk, name) - content, err := s.raw.Read(ctx, key) + id := core.NewObjectID(gvk, key) + // TODO: Sanitize id here: make it conform with the enforced rules + content, err := s.raw.Read(ctx, id) + if err != nil { + return err + } + + info, err := s.raw.Stat(ctx, id) if err != nil { return err } - ct := s.raw.ContentType(ctx, key) // TODO: Support various decoding options, e.g. defaulting? - return s.serializer.Decoder().DecodeInto(serializer.NewSingleFrameReader(content, ct), obj) + return s.serializer.Decoder().DecodeInto(serializer.NewSingleFrameReader(content, info.ContentType()), obj) } // List lists Objects for the specific kind. Optionally, filters can be applied (see the filter package @@ -147,7 +135,7 @@ func (s *GenericStorage) Get(ctx context.Context, name NamespacedName, obj Objec // If you do specify either an *unstructured.UnstructuredList or *metav1.PartialObjectMetadataList, // you need to populate TypeMeta with the GVK you want back. // TODO: Check if this works with metav1.List{} -func (s *GenericStorage) List(ctx context.Context, list ObjectList, opts ...client.ListOption) error { +func (s *storage) List(ctx context.Context, list core.ObjectList, opts ...client.ListOption) error { // This call will verify that list actually is a List type. gvk, err := serializer.GVKForList(list, s.serializer.Scheme()) if err != nil { @@ -157,12 +145,12 @@ func (s *GenericStorage) List(ctx context.Context, list ObjectList, opts ...clie listOpts := (&ListOptions{}).ApplyOptions(opts) // Do an internal list to get all objects - keys, err := s.raw.List(ctx, gvk) + keys, err := s.raw.List(ctx, gvk.GroupKind(), listOpts.Namespace) if err != nil { return err } - ch := make(chan Object, len(keys)) // TODO: This could be less + ch := make(chan core.Object, len(keys)) // TODO: This could be less wg := &sync.WaitGroup{} wg.Add(1) var processErr error @@ -192,27 +180,29 @@ func (s *GenericStorage) List(ctx context.Context, list ObjectList, opts ...clie return nil } -func (s *GenericStorage) Create(ctx context.Context, obj Object, _ ...client.CreateOption) error { +func (s *storage) Create(ctx context.Context, obj core.Object, _ ...client.CreateOption) error { // We must never save metadata-only structs if serializer.IsPartialObject(obj) { return ErrCannotSaveMetadata } - key, err := s.objectKeyForObj(ctx, obj) + // Get the id of the object + id, err := s.idForObj(ctx, obj) if err != nil { return nil } - if s.raw.Exists(ctx, key) { - return ErrAlreadyExists + // Do not create it if it already exists + if s.raw.Exists(ctx, id) { + return core.NewErrAlreadyExists(id) } // The object was not found so we can safely create it - return s.write(ctx, key, obj) + return s.write(ctx, id, obj) } // Note: This should also work for unstructured and partial metadata objects -func (s *GenericStorage) objectKeyForObj(ctx context.Context, obj Object) (ObjectKey, error) { +func (s *storage) idForObj(ctx context.Context, obj core.Object) (core.ObjectID, error) { gvk, err := serializer.GVKForObject(s.serializer.Scheme(), obj) if err != nil { return nil, err @@ -232,7 +222,7 @@ func (s *GenericStorage) objectKeyForObj(ctx context.Context, obj Object) (Objec var namespaces sets.String // If the namespace enforcer requires listing all the other namespaces, // look them up - if s.enforcer.RequireNamespaceExists() { + if s.enforcer.RequireSetNamespaceExists() { nsList := &metav1.PartialObjectMetadataList{} nsList.SetGroupVersionKind(v1GroupKind.WithKind(namespaceListKind)) if err := s.List(ctx, nsList); err != nil { @@ -250,39 +240,36 @@ func (s *GenericStorage) objectKeyForObj(ctx context.Context, obj Object) (Objec // At this point we know name is non-empty, and the namespace field is correct, // according to policy - return NewObjectKey(gvk, NamespacedName{ - Name: obj.GetName(), - Namespace: obj.GetNamespace(), - }), nil + return core.NewObjectID(gvk, core.ObjectKeyFromObject(obj)), nil } -func (s *GenericStorage) Update(ctx context.Context, obj Object, _ ...client.UpdateOption) error { +func (s *storage) Update(ctx context.Context, obj core.Object, _ ...client.UpdateOption) error { // We must never save metadata-only structs if serializer.IsPartialObject(obj) { return ErrCannotSaveMetadata } - key, err := s.objectKeyForObj(ctx, obj) + id, err := s.idForObj(ctx, obj) if err != nil { return nil } - return s.update(ctx, obj, key) + return s.update(ctx, obj, id) } -func (s *GenericStorage) update(ctx context.Context, obj Object, key ObjectKey) error { - if !s.raw.Exists(ctx, key) { - return ErrNotFound +func (s *storage) update(ctx context.Context, obj core.Object, id core.ObjectID) error { + if !s.raw.Exists(ctx, id) { + return core.NewErrNotFound(id) } // TODO: Validation? // The object was found so we can safely update it - return s.write(ctx, key, obj) + return s.write(ctx, id, obj) } // Patch performs a strategic merge patch on the object with the given UID, using the byte-encoded patch given -func (s *GenericStorage) Patch(ctx context.Context, obj Object, patch Patch, _ ...client.PatchOption) error { +func (s *storage) Patch(ctx context.Context, obj core.Object, patch core.Patch, _ ...client.PatchOption) error { // We must never save metadata-only structs if serializer.IsPartialObject(obj) { return ErrCannotSaveMetadata @@ -297,13 +284,13 @@ func (s *GenericStorage) Patch(ctx context.Context, obj Object, patch Patch, _ . // Get the object key for obj, this validates GVK, name and namespace // We need to do this before Get to be consistent with Update & Delete - key, err := s.objectKeyForObj(ctx, obj) + id, err := s.idForObj(ctx, obj) if err != nil { return err } // Load the current latest state into obj temporarily, before patching it - if err := s.Get(ctx, key.NamespacedName(), obj); err != nil { + if err := s.Get(ctx, id.ObjectKey(), obj); err != nil { return err } @@ -326,30 +313,30 @@ func (s *GenericStorage) Patch(ctx context.Context, obj Object, patch Patch, _ . // Perform an update internally, similar to what .Update would yield // TODO: Maybe write to storage conditionally? - return s.update(ctx, obj, key) + return s.update(ctx, obj, id) } // Delete removes an Object from the storage // PartialObjectMetadata should work here. -func (s *GenericStorage) Delete(ctx context.Context, obj Object, _ ...client.DeleteOption) error { - // Get the key for the object - key, err := s.objectKeyForObj(ctx, obj) +func (s *storage) Delete(ctx context.Context, obj core.Object, _ ...client.DeleteOption) error { + // Get the id for the object + id, err := s.idForObj(ctx, obj) if err != nil { return err } // Verify it did exist - if !s.raw.Exists(ctx, key) { - return ErrNotFound + if !s.raw.Exists(ctx, id) { + return core.NewErrNotFound(id) } // Delete it from the underlying storage - return s.raw.Delete(ctx, key) + return s.raw.Delete(ctx, id) } // DeleteAllOf deletes all matched resources by first doing a List() operation on the given GVK of // obj (obj is not used for anything else) and the given filters in opts. Only the Partial Meta -func (s *GenericStorage) DeleteAllOf(ctx context.Context, obj Object, opts ...client.DeleteAllOfOption) error { +func (s *storage) DeleteAllOf(ctx context.Context, obj core.Object, opts ...client.DeleteAllOfOption) error { // This applies both upstream and custom options, and propagates the options correctly to both // List() and Delete() customDeleteAllOpts := (&DeleteAllOfOptions{}).ApplyOptions(opts) @@ -377,11 +364,11 @@ func (s *GenericStorage) DeleteAllOf(ctx context.Context, obj Object, opts ...cl return nil } -func (s *GenericStorage) write(ctx context.Context, key ObjectKey, obj Object) error { - // Set the content type based on the format given by the RawStorage, but default to JSON - contentType := serializer.ContentTypeJSON - if ct := s.raw.ContentType(ctx, key); len(ct) != 0 { - contentType = ct +func (s *storage) write(ctx context.Context, id core.ObjectID, obj core.Object) error { + // TODO: Figure out how to get ContentType before the object actually exists! + ct, err := s.raw.ContentType(ctx, id) + if err != nil { + return err } // Set creationTimestamp if not already populated @@ -391,62 +378,65 @@ func (s *GenericStorage) write(ctx context.Context, key ObjectKey, obj Object) e } var objBytes bytes.Buffer - err := s.serializer.Encoder().Encode(serializer.NewFrameWriter(contentType, &objBytes), obj) + // TODO: Work with any ContentType, not just JSON/YAML. + err = s.serializer.Encoder().Encode(serializer.NewFrameWriter(ct, &objBytes), obj) if err != nil { return err } - return s.raw.Write(ctx, key, objBytes.Bytes()) + return s.raw.Write(ctx, id, objBytes.Bytes()) } // RawStorage returns the RawStorage instance backing this Storage -func (s *GenericStorage) RawStorage() RawStorage { +func (s *storage) RawStorage() raw.Storage { return s.raw } // Close closes all underlying resources (e.g. goroutines) used; before the application exits -func (s *GenericStorage) Close() error { - return nil // nothing to do here for GenericStorage +func (s *storage) Close() error { + return nil // nothing to do here for storage } // Scheme returns the scheme this client is using. -func (s *GenericStorage) Scheme() *kruntime.Scheme { +func (s *storage) Scheme() *kruntime.Scheme { return s.serializer.Scheme() } // RESTMapper returns the rest this client is using. For now, this returns nil, so don't use. -func (s *GenericStorage) RESTMapper() meta.RESTMapper { +func (s *storage) RESTMapper() meta.RESTMapper { return nil } -func createObject(gvk KindKey, scheme *kruntime.Scheme) NewObjectFunc { - return func() (Object, error) { +type newObjectFunc func() (core.Object, error) + +func createObject(gvk core.GroupVersionKind, scheme *kruntime.Scheme) newObjectFunc { + return func() (core.Object, error) { return NewObjectForGVK(gvk, scheme) } } -func createPartialObject(gvk KindKey) NewObjectFunc { - return func() (Object, error) { +func createPartialObject(gvk core.GroupVersionKind) newObjectFunc { + return func() (core.Object, error) { obj := &metav1.PartialObjectMetadata{} obj.SetGroupVersionKind(gvk) return obj, nil } } -func createUnstructuredObject(gvk KindKey) NewObjectFunc { - return func() (Object, error) { +func createUnstructuredObject(gvk core.GroupVersionKind) newObjectFunc { + return func() (core.Object, error) { obj := &unstructured.Unstructured{} obj.SetGroupVersionKind(gvk) return obj, nil } } -func (s *GenericStorage) processKeys(ctx context.Context, keys []ObjectKey, filterOpts *filter.FilterOptions, fn NewObjectFunc, output chan Object) error { +func (s *storage) processKeys(ctx context.Context, keys []core.ObjectKey, filterOpts *filter.FilterOptions, fn newObjectFunc, output chan core.Object) error { wg := &sync.WaitGroup{} wg.Add(len(keys)) multiErr := &validation.MultiError{} // TODO: Thread-safe append for _, k := range keys { - go func(key ObjectKey) { + go func(key core.ObjectKey) { defer wg.Done() // Create a new object, and decode into it using Get @@ -456,7 +446,7 @@ func (s *GenericStorage) processKeys(ctx context.Context, keys []ObjectKey, filt return } - if err := s.Get(ctx, key.NamespacedName(), obj); err != nil { + if err := s.Get(ctx, key, obj); err != nil { multiErr.Errors = append(multiErr.Errors, err) return } diff --git a/pkg/storage/utils.go b/pkg/storage/utils.go new file mode 100644 index 00000000..dd5396cd --- /dev/null +++ b/pkg/storage/utils.go @@ -0,0 +1,23 @@ +package storage + +import ( + "errors" + "fmt" + + "github.com/weaveworks/libgitops/pkg/storage/core" + "k8s.io/apimachinery/pkg/runtime" +) + +var ErrNoMetadata = errors.New("it is required to embed ObjectMeta into the serialized API type") + +func NewObjectForGVK(gvk core.GroupVersionKind, scheme *runtime.Scheme) (core.Object, error) { + kobj, err := scheme.New(gvk) + if err != nil { + return nil, err + } + obj, ok := kobj.(core.Object) + if !ok { + return nil, fmt.Errorf("%w: %s", ErrNoMetadata, gvk) + } + return obj, nil +} From c57092d587bd1860c69182b229fb346759705fa7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Fri, 22 Jan 2021 15:03:18 +0200 Subject: [PATCH 046/149] Split out a "backend" part of the storage. --- pkg/storage/backend.go | 301 +++++++++++++++++++++++++++++++++++++++++ pkg/storage/storage.go | 298 ++++++++++------------------------------ 2 files changed, 372 insertions(+), 227 deletions(-) create mode 100644 pkg/storage/backend.go diff --git a/pkg/storage/backend.go b/pkg/storage/backend.go new file mode 100644 index 00000000..da46699c --- /dev/null +++ b/pkg/storage/backend.go @@ -0,0 +1,301 @@ +package storage + +import ( + "bytes" + "context" + + "github.com/weaveworks/libgitops/pkg/serializer" + "github.com/weaveworks/libgitops/pkg/storage/core" + "github.com/weaveworks/libgitops/pkg/storage/raw" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" +) + +// TODO: Make a *core.Unknown that has +// 1. TypeMeta +// 2. DeepCopies (for Object compatibility), +// 3. ObjectMeta +// 4. Spec { Data []byte, ContentType ContentType, Object interface{} } +// 5. Status { Data []byte, ContentType ContentType, Object interface{} } +// TODO: Need to make sure we never write this internal struct to disk (MarshalJSON error?) + +type BackendAccessors interface { + Storage() raw.Storage + NamespaceEnforcer() core.NamespaceEnforcer + Scheme() *runtime.Scheme + Validator() BackendValidator + StorageVersioner() StorageVersioner +} + +type BackendReader interface { + BackendAccessors + + Get(ctx context.Context, obj core.Object) error + raw.Lister +} + +type BackendWriter interface { + BackendAccessors + + Create(ctx context.Context, obj core.Object) error + Update(ctx context.Context, obj core.Object) error + Delete(ctx context.Context, obj core.Object) error +} + +type Backend interface { + BackendReader + BackendWriter +} + +type ChangeOperation string + +const ( + ChangeOperationCreate ChangeOperation = "create" + ChangeOperationUpdate ChangeOperation = "update" + ChangeOperationDelete ChangeOperation = "delete" +) + +type BackendValidator interface { + ValidateChange(ctx context.Context, backend BackendReader, op ChangeOperation, obj core.Object) error +} + +type StorageVersioner interface { + StorageVersion(ctx context.Context, id core.ObjectID) (core.GroupVersion, error) +} + +func NewGenericBackend( + storage raw.Storage, + serializer serializer.Serializer, // TODO: only scheme required, encode/decode optional? + enforcer core.NamespaceEnforcer, + validator BackendValidator, // TODO: optional? + versioner StorageVersioner, // TODO: optional? +) (*GenericBackend, error) { + // TODO: validate options + return &GenericBackend{ + scheme: serializer.Scheme(), + encoder: serializer.Encoder(), + decoder: serializer.Decoder(), + + storage: storage, + enforcer: enforcer, + validator: validator, + versioner: versioner, + }, nil +} + +var _ Backend = &GenericBackend{} + +type GenericBackend struct { + scheme *runtime.Scheme + decoder serializer.Decoder + encoder serializer.Encoder + + storage raw.Storage + enforcer core.NamespaceEnforcer + validator BackendValidator + versioner StorageVersioner +} + +func (b *GenericBackend) Scheme() *runtime.Scheme { + return b.scheme +} + +func (b *GenericBackend) Storage() raw.Storage { + return b.storage +} + +func (b *GenericBackend) NamespaceEnforcer() core.NamespaceEnforcer { + return b.enforcer +} + +func (b *GenericBackend) Validator() BackendValidator { + return b.validator +} + +func (b *GenericBackend) StorageVersioner() StorageVersioner { + return b.versioner +} + +func (b *GenericBackend) Get(ctx context.Context, obj core.Object) error { + // Get the versioned ID for the given obj. This might mutate obj wrt namespacing info. + id, err := b.idForObj(ctx, obj) + if err != nil { + return err + } + // Read the underlying bytes + content, err := b.storage.Read(ctx, id) + if err != nil { + return err + } + // Get the right content type for the data + ct, err := b.storage.ContentType(ctx, id) + if err != nil { + return err + } + + // TODO: Support various decoding options, e.g. defaulting? + // TODO: Does this "replace" already-set fields? + return b.decoder.DecodeInto(serializer.NewSingleFrameReader(content, ct), obj) +} + +// ListNamespaces lists the available namespaces for the given GroupKind. +// This function shall only be called for namespaced objects, it is up to +// the caller to make sure they do not call this method for root-spaced +// objects; for that the behavior is undefined (but returning an error +// is recommended). +func (b *GenericBackend) ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.String, error) { + return b.storage.ListNamespaces(ctx, gk) +} + +// ListObjectKeys returns a list of names (with optionally, the namespace). +// For namespaced GroupKinds, the caller must provide a namespace, and for +// root-spaced GroupKinds, the caller must not. When namespaced, this function +// must only return object keys for that given namespace. +func (b *GenericBackend) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) ([]core.UnversionedObjectID, error) { + return b.storage.ListObjectIDs(ctx, gk, namespace) +} + +func (b *GenericBackend) Create(ctx context.Context, obj core.Object) error { + // We must never save metadata-only structs + if serializer.IsPartialObject(obj) { + return ErrCannotSaveMetadata + } + + // Get the versioned ID for the given obj. This might mutate obj wrt namespacing info. + id, err := b.idForObj(ctx, obj) + if err != nil { + return err + } + + // Do not create it if it already exists + if b.storage.Exists(ctx, id) { + return core.NewErrAlreadyExists(id) + } + + // Validate that the change is ok + // TODO: Don't make "upcasting" possible here + if err := b.validator.ValidateChange(ctx, b, ChangeOperationCreate, obj); err != nil { + return err + } + + // Internal, common write shared with Update() + return b.write(ctx, id, obj) +} +func (b *GenericBackend) Update(ctx context.Context, obj core.Object) error { + // We must never save metadata-only structs + if serializer.IsPartialObject(obj) { + return ErrCannotSaveMetadata + } + + // Get the versioned ID for the given obj. This might mutate obj wrt namespacing info. + id, err := b.idForObj(ctx, obj) + if err != nil { + return err + } + + // Require that the object already exists + if !b.storage.Exists(ctx, id) { + return core.NewErrNotFound(id) + } + + // Validate that the change is ok + // TODO: Don't make "upcasting" possible here + if err := b.validator.ValidateChange(ctx, b, ChangeOperationUpdate, obj); err != nil { + return err + } + + // Internal, common write shared with Create() + return b.write(ctx, id, obj) +} + +func (b *GenericBackend) write(ctx context.Context, id core.ObjectID, obj core.Object) error { + // TODO: Figure out how to get ContentType before the object actually exists! + ct, err := b.storage.ContentType(ctx, id) + if err != nil { + return err + } + // Get the given storage version + gv, err := b.versioner.StorageVersion(ctx, id) + if err != nil { + return err + } + + // Set creationTimestamp if not already populated + t := obj.GetCreationTimestamp() + if t.IsZero() { + obj.SetCreationTimestamp(metav1.Now()) + } + + var objBytes bytes.Buffer + // TODO: Work with any ContentType, not just JSON/YAML. Or, make a SingleFrameWriter for any ct. + err = b.encoder.EncodeForGroupVersion(serializer.NewFrameWriter(ct, &objBytes), obj, gv) + if err != nil { + return err + } + + return b.storage.Write(ctx, id, objBytes.Bytes()) +} + +func (b *GenericBackend) Delete(ctx context.Context, obj core.Object) error { + // Get the versioned ID for the given obj. This might mutate obj wrt namespacing info. + id, err := b.idForObj(ctx, obj) + if err != nil { + return err + } + + // Verify it did exist + if !b.storage.Exists(ctx, id) { + return core.NewErrNotFound(id) + } + + // Validate that the change is ok + // TODO: Don't make "upcasting" possible here + if err := b.validator.ValidateChange(ctx, b, ChangeOperationDelete, obj); err != nil { + return err + } + + // Delete it from the underlying storage + return b.storage.Delete(ctx, id) +} + +// Note: This should also work for unstructured and partial metadata objects +func (b *GenericBackend) idForObj(ctx context.Context, obj core.Object) (core.ObjectID, error) { + gvk, err := serializer.GVKForObject(b.scheme, obj) + if err != nil { + return nil, err + } + + // Object must always have .metadata.name set + if len(obj.GetName()) == 0 { + return nil, ErrNameRequired + } + + // Check if the GroupKind is namespaced + namespaced, err := b.storage.Namespacer().IsNamespaced(gvk.GroupKind()) + if err != nil { + return nil, err + } + + var namespaces sets.String + // If the namespace enforcer requires listing all the other namespaces, + // look them up + if b.enforcer.RequireSetNamespaceExists() { + objIDs, err := b.storage.ListObjectIDs(ctx, v1GroupKind.WithKind("Namespace").GroupKind(), "") + if err != nil { + return nil, err + } + namespaces = sets.NewString() + for _, id := range objIDs { + namespaces.Insert(id.ObjectKey().Name) + } + } + // Enforce the given namespace policy. This might mutate obj + if err := b.enforcer.EnforceNamespace(obj, namespaced, namespaces); err != nil { + return nil, err + } + + // At this point we know name is non-empty, and the namespace field is correct, + // according to policy + return core.NewObjectID(gvk, core.ObjectKeyFromObject(obj)), nil +} diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go index e8133fca..9b9bde35 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/storage.go @@ -1,19 +1,15 @@ package storage import ( - "bytes" "context" "errors" "fmt" - "io" "sync" "github.com/fluxcd/go-git-providers/validation" "github.com/weaveworks/libgitops/pkg/filter" - "github.com/weaveworks/libgitops/pkg/runtime" "github.com/weaveworks/libgitops/pkg/serializer" "github.com/weaveworks/libgitops/pkg/storage/core" - "github.com/weaveworks/libgitops/pkg/storage/raw" patchutil "github.com/weaveworks/libgitops/pkg/util/patch" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -24,6 +20,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) +// TODO: Rename to Client? Talk objects to the "Storage" part instead? +// TODO: Make it possible to specify the "storage version" manually? +// TODO: Pass an ObjectID that contains all PartialObjectMetadata info for "downstream" consumers +// that can make use of it by "casting up". + var ( // ErrCannotSaveMetadata is returned if the user tries to save metadata-only objects ErrCannotSaveMetadata = errors.New("cannot save (Create|Update|Patch) *metav1.PartialObjectMetadata") @@ -47,9 +48,10 @@ type CommonStorage interface { // RawStorage returns the RawStorage instance backing this Storage // It is expected that RawStorage only operates on one "frame" at a time in its Read/Write operations. - RawStorage() raw.Storage + //RawStorage() raw.Storage // Serializer returns the serializer - Serializer() serializer.Serializer + //Serializer() serializer.Serializer + Backend() Backend // // Misc methods. @@ -58,6 +60,7 @@ type CommonStorage interface { // Close closes all underlying resources (e.g. goroutines) used; before the application exits // TODO: Maybe this instead should apply to raw.Storage's now? Close() error + // io.Closer } // ReadStorage TODO @@ -87,45 +90,29 @@ type Storage interface { } // NewGenericStorage constructs a new Storage -func NewGenericStorage(rawStorage raw.Storage, serializer serializer.Serializer, enforcer core.NamespaceEnforcer) Storage { - return &storage{rawStorage, serializer, enforcer} +func NewGenericStorage(backend Backend, patcher serializer.Patcher) Storage { + return &storage{backend, patcher} } // storage implements the Storage interface type storage struct { - raw raw.Storage - serializer serializer.Serializer - enforcer core.NamespaceEnforcer + backend Backend + patcher serializer.Patcher } var _ Storage = &storage{} -func (s *storage) Serializer() serializer.Serializer { - return s.serializer +func (s *storage) Backend() Backend { + return s.backend } // Get returns a new Object for the resource at the specified kind/uid path, based on the file content. // In order to only extract the metadata of this object, pass in a *metav1.PartialObjectMetadata func (s *storage) Get(ctx context.Context, key core.ObjectKey, obj core.Object) error { - gvk, err := serializer.GVKForObject(s.serializer.Scheme(), obj) - if err != nil { - return err - } + obj.SetName(key.Name) + obj.SetNamespace(key.Namespace) - id := core.NewObjectID(gvk, key) - // TODO: Sanitize id here: make it conform with the enforced rules - content, err := s.raw.Read(ctx, id) - if err != nil { - return err - } - - info, err := s.raw.Stat(ctx, id) - if err != nil { - return err - } - - // TODO: Support various decoding options, e.g. defaulting? - return s.serializer.Decoder().DecodeInto(serializer.NewSingleFrameReader(content, info.ContentType()), obj) + return s.backend.Get(ctx, obj) } // List lists Objects for the specific kind. Optionally, filters can be applied (see the filter package @@ -135,37 +122,63 @@ func (s *storage) Get(ctx context.Context, key core.ObjectKey, obj core.Object) // If you do specify either an *unstructured.UnstructuredList or *metav1.PartialObjectMetadataList, // you need to populate TypeMeta with the GVK you want back. // TODO: Check if this works with metav1.List{} +// TODO: Create constructors for the different kinds of lists? func (s *storage) List(ctx context.Context, list core.ObjectList, opts ...client.ListOption) error { // This call will verify that list actually is a List type. - gvk, err := serializer.GVKForList(list, s.serializer.Scheme()) + gvk, err := serializer.GVKForList(list, s.backend.Scheme()) if err != nil { return err } // This applies both upstream and custom options listOpts := (&ListOptions{}).ApplyOptions(opts) - // Do an internal list to get all objects - keys, err := s.raw.List(ctx, gvk.GroupKind(), listOpts.Namespace) + // Get namespacing info + gk := gvk.GroupKind() + namespaced, err := s.backend.Storage().Namespacer().IsNamespaced(gk) if err != nil { return err } - ch := make(chan core.Object, len(keys)) // TODO: This could be less + // By default, only search the given namespace. It is fully valid for this to be an + // empty string: it is the only + namespaces := sets.NewString(listOpts.Namespace) + // However, if the GroupKind is namespaced, and the given "filter namespace" in list + // options is empty, it means that one should list all namespaces + if namespaced && listOpts.Namespace == "" { + namespaces, err = s.backend.ListNamespaces(ctx, gk) + if err != nil { + return err + } + } else if !namespaced && listOpts.Namespace != "" { + return errors.New("invalid namespace option: cannot filter namespace for root-spaced object") + } + + allIDs := []core.UnversionedObjectID{} + for ns := range namespaces { + ids, err := s.backend.ListObjectIDs(ctx, gk, ns) + if err != nil { + return err + } + allIDs = append(allIDs, ids...) + } + + // TODO: Is this a good default? Need to balance mem usage and speed. This is prob. too much + ch := make(chan core.Object, len(allIDs)) wg := &sync.WaitGroup{} wg.Add(1) var processErr error go func() { - createFunc := createObject(gvk, s.serializer.Scheme()) + createFunc := createObject(gvk, s.backend.Scheme()) if serializer.IsPartialObjectList(list) { createFunc = createPartialObject(gvk) } else if serializer.IsUnstructuredList(list) { createFunc = createUnstructuredObject(gvk) } - processErr = s.processKeys(ctx, keys, &listOpts.FilterOptions, createFunc, ch) + processErr = s.processKeys(ctx, allIDs, &listOpts.FilterOptions, createFunc, ch) wg.Done() }() - objs := make([]kruntime.Object, 0, len(keys)) + objs := make([]kruntime.Object, 0, len(allIDs)) for o := range ch { objs = append(objs, o) } @@ -181,96 +194,16 @@ func (s *storage) List(ctx context.Context, list core.ObjectList, opts ...client } func (s *storage) Create(ctx context.Context, obj core.Object, _ ...client.CreateOption) error { - // We must never save metadata-only structs - if serializer.IsPartialObject(obj) { - return ErrCannotSaveMetadata - } - - // Get the id of the object - id, err := s.idForObj(ctx, obj) - if err != nil { - return nil - } - - // Do not create it if it already exists - if s.raw.Exists(ctx, id) { - return core.NewErrAlreadyExists(id) - } - - // The object was not found so we can safely create it - return s.write(ctx, id, obj) -} - -// Note: This should also work for unstructured and partial metadata objects -func (s *storage) idForObj(ctx context.Context, obj core.Object) (core.ObjectID, error) { - gvk, err := serializer.GVKForObject(s.serializer.Scheme(), obj) - if err != nil { - return nil, err - } - - // Object must always have .metadata.name set - if len(obj.GetName()) == 0 { - return nil, ErrNameRequired - } - - // Check if the GroupKind is namespaced - namespaced, err := s.raw.Namespacer().IsNamespaced(gvk.GroupKind()) - if err != nil { - return nil, err - } - - var namespaces sets.String - // If the namespace enforcer requires listing all the other namespaces, - // look them up - if s.enforcer.RequireSetNamespaceExists() { - nsList := &metav1.PartialObjectMetadataList{} - nsList.SetGroupVersionKind(v1GroupKind.WithKind(namespaceListKind)) - if err := s.List(ctx, nsList); err != nil { - return nil, err - } - namespaces = sets.NewString() - for _, ns := range nsList.Items { - namespaces.Insert(ns.GetName()) - } - } - // Enforce the given namespace policy. This might mutate obj - if err := s.enforcer.EnforceNamespace(obj, namespaced, namespaces); err != nil { - return nil, err - } - - // At this point we know name is non-empty, and the namespace field is correct, - // according to policy - return core.NewObjectID(gvk, core.ObjectKeyFromObject(obj)), nil + return s.backend.Create(ctx, obj) } func (s *storage) Update(ctx context.Context, obj core.Object, _ ...client.UpdateOption) error { - // We must never save metadata-only structs - if serializer.IsPartialObject(obj) { - return ErrCannotSaveMetadata - } - - id, err := s.idForObj(ctx, obj) - if err != nil { - return nil - } - - return s.update(ctx, obj, id) -} - -func (s *storage) update(ctx context.Context, obj core.Object, id core.ObjectID) error { - if !s.raw.Exists(ctx, id) { - return core.NewErrNotFound(id) - } - - // TODO: Validation? - - // The object was found so we can safely update it - return s.write(ctx, id, obj) + return s.backend.Update(ctx, obj) } // Patch performs a strategic merge patch on the object with the given UID, using the byte-encoded patch given func (s *storage) Patch(ctx context.Context, obj core.Object, patch core.Patch, _ ...client.PatchOption) error { - // We must never save metadata-only structs + // Fail-fast: We must never save metadata-only structs if serializer.IsPartialObject(obj) { return ErrCannotSaveMetadata } @@ -282,19 +215,14 @@ func (s *storage) Patch(ctx context.Context, obj core.Object, patch core.Patch, return err } - // Get the object key for obj, this validates GVK, name and namespace - // We need to do this before Get to be consistent with Update & Delete - id, err := s.idForObj(ctx, obj) - if err != nil { - return err - } - // Load the current latest state into obj temporarily, before patching it - if err := s.Get(ctx, id.ObjectKey(), obj); err != nil { + // This also validates the GVK, name and namespace. + if err := s.backend.Get(ctx, obj); err != nil { return err } // Get the right BytePatcher for this patch type + // TODO: Make this return an error bytePatcher := patchutil.BytePatcherForType(patch.Type()) if bytePatcher == nil { return fmt.Errorf("patch type not supported: %s", patch.Type()) @@ -303,35 +231,24 @@ func (s *storage) Patch(ctx context.Context, obj core.Object, patch core.Patch, // Apply the patch into the object using the given byte patcher if unstruct, ok := obj.(kruntime.Unstructured); ok { // TODO: Provide an option for the schema - err = s.serializer.Patcher().ApplyOnUnstructured(bytePatcher, patchJSON, unstruct, nil) + err = s.patcher.ApplyOnUnstructured(bytePatcher, patchJSON, unstruct, nil) } else { - err = s.serializer.Patcher().ApplyOnStruct(bytePatcher, patchJSON, obj) + err = s.patcher.ApplyOnStruct(bytePatcher, patchJSON, obj) } if err != nil { return err } // Perform an update internally, similar to what .Update would yield - // TODO: Maybe write to storage conditionally? - return s.update(ctx, obj, id) + // TODO: Maybe write to storage conditionally? using DryRun all + return s.Update(ctx, obj) + //return s.update(ctx, obj, id) } // Delete removes an Object from the storage // PartialObjectMetadata should work here. func (s *storage) Delete(ctx context.Context, obj core.Object, _ ...client.DeleteOption) error { - // Get the id for the object - id, err := s.idForObj(ctx, obj) - if err != nil { - return err - } - - // Verify it did exist - if !s.raw.Exists(ctx, id) { - return core.NewErrNotFound(id) - } - - // Delete it from the underlying storage - return s.raw.Delete(ctx, id) + return s.backend.Delete(ctx, obj) } // DeleteAllOf deletes all matched resources by first doing a List() operation on the given GVK of @@ -342,7 +259,7 @@ func (s *storage) DeleteAllOf(ctx context.Context, obj core.Object, opts ...clie customDeleteAllOpts := (&DeleteAllOfOptions{}).ApplyOptions(opts) // Get the GVK of the object - gvk, err := serializer.GVKForObject(s.serializer.Scheme(), obj) + gvk, err := serializer.GVKForObject(s.backend.Scheme(), obj) if err != nil { return err } @@ -364,34 +281,6 @@ func (s *storage) DeleteAllOf(ctx context.Context, obj core.Object, opts ...clie return nil } -func (s *storage) write(ctx context.Context, id core.ObjectID, obj core.Object) error { - // TODO: Figure out how to get ContentType before the object actually exists! - ct, err := s.raw.ContentType(ctx, id) - if err != nil { - return err - } - - // Set creationTimestamp if not already populated - t := obj.GetCreationTimestamp() - if t.IsZero() { - obj.SetCreationTimestamp(metav1.Now()) - } - - var objBytes bytes.Buffer - // TODO: Work with any ContentType, not just JSON/YAML. - err = s.serializer.Encoder().Encode(serializer.NewFrameWriter(ct, &objBytes), obj) - if err != nil { - return err - } - - return s.raw.Write(ctx, id, objBytes.Bytes()) -} - -// RawStorage returns the RawStorage instance backing this Storage -func (s *storage) RawStorage() raw.Storage { - return s.raw -} - // Close closes all underlying resources (e.g. goroutines) used; before the application exits func (s *storage) Close() error { return nil // nothing to do here for storage @@ -399,7 +288,7 @@ func (s *storage) Close() error { // Scheme returns the scheme this client is using. func (s *storage) Scheme() *kruntime.Scheme { - return s.serializer.Scheme() + return s.backend.Scheme() } // RESTMapper returns the rest this client is using. For now, this returns nil, so don't use. @@ -431,12 +320,12 @@ func createUnstructuredObject(gvk core.GroupVersionKind) newObjectFunc { } } -func (s *storage) processKeys(ctx context.Context, keys []core.ObjectKey, filterOpts *filter.FilterOptions, fn newObjectFunc, output chan core.Object) error { +func (s *storage) processKeys(ctx context.Context, ids []core.UnversionedObjectID, filterOpts *filter.FilterOptions, fn newObjectFunc, output chan core.Object) error { wg := &sync.WaitGroup{} - wg.Add(len(keys)) + wg.Add(len(ids)) multiErr := &validation.MultiError{} // TODO: Thread-safe append - for _, k := range keys { - go func(key core.ObjectKey) { + for _, i := range ids { + go func(id core.UnversionedObjectID) { defer wg.Done() // Create a new object, and decode into it using Get @@ -446,7 +335,7 @@ func (s *storage) processKeys(ctx context.Context, keys []core.ObjectKey, filter return } - if err := s.Get(ctx, key, obj); err != nil { + if err := s.Get(ctx, id.ObjectKey(), obj); err != nil { multiErr.Errors = append(multiErr.Errors, err) return } @@ -462,7 +351,7 @@ func (s *storage) processKeys(ctx context.Context, keys []core.ObjectKey, filter } output <- obj - }(k) + }(i) } wg.Wait() // Close the output channel so that the for-range loop stops @@ -474,48 +363,3 @@ func (s *storage) processKeys(ctx context.Context, keys []core.ObjectKey, filter } return nil } - -// DecodePartialObjects reads any set of frames from the given ReadCloser, decodes the frames into -// PartialObjects, validates that the decoded objects are known to the scheme, and optionally sets a default -// group. -// TODO: Is this call relevant in the future? -func DecodePartialObjects(rc io.ReadCloser, scheme *kruntime.Scheme, allowMultiple bool, defaultGVK *schema.GroupVersionKind) ([]runtime.PartialObject, error) { - fr := serializer.NewYAMLFrameReader(rc) - - frames, err := serializer.ReadFrameList(fr) - if err != nil { - return nil, err - } - - // If we only allow one frame, signal that early - if !allowMultiple && len(frames) != 1 { - return nil, fmt.Errorf("DecodePartialObjects: unexpected number of frames received from ReadCloser: %d expected 1", len(frames)) - } - - objs := make([]runtime.PartialObject, 0, len(frames)) - for _, frame := range frames { - partobj, err := runtime.NewPartialObject(frame) - if err != nil { - return nil, err - } - - gvk := partobj.GetObjectKind().GroupVersionKind() - - // Don't decode API objects unknown to the scheme (e.g. Kubernetes manifests) - if !scheme.Recognizes(gvk) { - // TODO: Typed error - return nil, fmt.Errorf("unknown GroupVersionKind: %s", partobj.GetObjectKind().GroupVersionKind()) - } - - if defaultGVK != nil { - // Set the desired gvk from the caller of this Object, if defaultGVK is set - // In practice, this means, although we got an external type, - // we might want internal Objects later in the client. Hence, - // set the right expectation here - partobj.GetObjectKind().SetGroupVersionKind(gvk) - } - - objs = append(objs, partobj) - } - return objs, nil -} From 193209cb9c2eab25dd9900c4da7b02983fd534d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Sat, 23 Jan 2021 20:48:14 +0200 Subject: [PATCH 047/149] Make a dedicated Lister interface, and also ContentTypeResolver, that is shared between raw.Storage and raw.FileFinder. Make Storage validate namespace info rigorously. Move RootDir into AferoContext, and Filesystem into the FileFinder. --- pkg/storage/core/afero.go | 28 ++- pkg/storage/raw/filefinder_mapped.go | 77 ++++---- pkg/storage/raw/filefinder_simple.go | 142 +++++++++------ pkg/storage/raw/interfaces.go | 116 +++++++----- pkg/storage/raw/rawstorage.go | 176 ++++++++----------- pkg/storage/raw/rawstorage_options.go | 33 ---- pkg/storage/raw/watch/inotify/filewatcher.go | 2 +- pkg/storage/raw/watch/manifest/manifest.go | 5 +- pkg/storage/raw/watch/watch.go | 17 +- 9 files changed, 320 insertions(+), 276 deletions(-) delete mode 100644 pkg/storage/raw/rawstorage_options.go diff --git a/pkg/storage/core/afero.go b/pkg/storage/core/afero.go index 68229e1d..2e0a9475 100644 --- a/pkg/storage/core/afero.go +++ b/pkg/storage/core/afero.go @@ -10,6 +10,11 @@ import ( // AferoContext extends afero.Fs and afero.Afero with contexts added to every method. type AferoContext interface { + // RootDirectory specifies where on disk the root directory is stored. + // This path MUST be absolute. All other paths for the other methods + // MUST be relative to this directory. + RootDirectory() string + // Members of afero.Fs // MkdirAll creates a directory path and all parents that does not exist @@ -35,14 +40,27 @@ type AferoContext interface { Walk(ctx context.Context, root string, walkFn filepath.WalkFunc) error } -// AferoWithoutContext wraps an underlying afero.Fs without context knowledge, -// in a AferoContext-compliant implementation. -func AferoWithoutContext(fs afero.Fs) AferoContext { - return &aferoWithoutCtx{fs} +// AferoContextForLocalDir creates a new afero.OsFs for the local directory, wrapped +// in AferoContextWrapperForDir. +func AferoContextForLocalDir(rootDir string) AferoContext { + return AferoContextWrapperForDir(afero.NewOsFs(), rootDir) +} + +// AferoContextWrapperForDir wraps an underlying afero.Fs without context knowledge, +// in a AferoContext-compliant implementation; scoped at the given directory +// (i.e. wrapped in afero.NewBasePathFs(fs, rootDir)). +func AferoContextWrapperForDir(fs afero.Fs, rootDir string) AferoContext { + // TODO: rootDir validation? It must be absolute, exist, and be a directory. + return &aferoWithoutCtx{afero.NewBasePathFs(fs, rootDir), rootDir} } type aferoWithoutCtx struct { - fs afero.Fs + fs afero.Fs + rootDir string +} + +func (a *aferoWithoutCtx) RootDirectory() string { + return a.rootDir } func (a *aferoWithoutCtx) MkdirAll(_ context.Context, path string, perm os.FileMode) error { diff --git a/pkg/storage/raw/filefinder_mapped.go b/pkg/storage/raw/filefinder_mapped.go index a1807614..0913c3df 100644 --- a/pkg/storage/raw/filefinder_mapped.go +++ b/pkg/storage/raw/filefinder_mapped.go @@ -3,10 +3,11 @@ package raw import ( "context" "errors" - "fmt" + "github.com/fluxcd/go-git-providers/validation" "github.com/weaveworks/libgitops/pkg/serializer" "github.com/weaveworks/libgitops/pkg/storage/core" + "k8s.io/apimachinery/pkg/util/sets" ) var ( @@ -20,13 +21,18 @@ var _ MappedFileFinder = &GenericMappedFileFinder{} // NewGenericMappedFileFinder creates a new instance of GenericMappedFileFinder, // that implements the MappedFileFinder interface. The contentTyper is optional, // by default core.DefaultContentTyper will be used. -func NewGenericMappedFileFinder(contentTyper core.ContentTyper) MappedFileFinder { +func NewGenericMappedFileFinder(contentTyper core.ContentTyper, fs core.AferoContext) MappedFileFinder { if contentTyper == nil { contentTyper = core.DefaultContentTyper } + if fs == nil { + panic("NewGenericMappedFileFinder: fs is mandatory") + } return &GenericMappedFileFinder{ contentTyper: contentTyper, - branch: &branchImpl{}, + // TODO: Support multiple branches + branch: &branchImpl{}, + fs: fs, } } @@ -42,29 +48,28 @@ func NewGenericMappedFileFinder(contentTyper core.ContentTyper) MappedFileFinder type GenericMappedFileFinder struct { // Default: DefaultContentTyper contentTyper core.ContentTyper + fs core.AferoContext branch branch } +func (f *GenericMappedFileFinder) Filesystem() core.AferoContext { + return f.fs +} + // ObjectPath gets the file path relative to the root directory -func (f *GenericMappedFileFinder) ObjectPath(ctx context.Context, _ core.AferoContext, id core.UnversionedObjectID, namespaced bool) (string, error) { - ns := id.ObjectKey().Namespace - // TODO: can we do this better? - if namespaced && ns == "" { - return "", fmt.Errorf("invalid empty namespace for namespaced object") - } else if !namespaced && ns != "" { - return "", fmt.Errorf("invalid non-empty namespace for non-namespaced object") - } +func (f *GenericMappedFileFinder) ObjectPath(ctx context.Context, id core.UnversionedObjectID) (string, error) { cp, ok := f.GetMapping(ctx, id) if !ok { - return "", ErrNotTracked + // TODO: separate interface for "new creates"? + return "", &validation.MultiError{Errors: []error{ErrNotTracked, core.NewErrNotFound(id)}} } return cp.Path, nil } // ObjectAt retrieves the ID containing the virtual path based // on the given physical file path. -func (f *GenericMappedFileFinder) ObjectAt(ctx context.Context, _ core.AferoContext, path string) (core.UnversionedObjectID, error) { +func (f *GenericMappedFileFinder) ObjectAt(ctx context.Context, path string) (core.UnversionedObjectID, error) { // TODO: Add reverse tracking too? for gk, gkIter := range f.branch.raw() { for ns, nsIter := range gkIter.raw() { @@ -80,35 +85,47 @@ func (f *GenericMappedFileFinder) ObjectAt(ctx context.Context, _ core.AferoCont return nil, ErrNotTracked } -// ListNamespaces lists the available namespaces for the given GroupKind +// ListNamespaces lists the available namespaces for the given GroupKind. // This function shall only be called for namespaced objects, it is up to // the caller to make sure they do not call this method for root-spaced -// objects; for that the behavior is undefined (but returning an error -// is recommended). -func (f *GenericMappedFileFinder) ListNamespaces(ctx context.Context, _ core.AferoContext, gk core.GroupKind) ([]string, error) { +// objects. If any of the given rules are violated, ErrNamespacedMismatch +// should be returned as a wrapped error. +// +// The implementer can choose between basing the answer strictly on e.g. +// v1.Namespace objects that exist in the system, or just the set of +// different namespaces that have been set on any object belonging to +// the given GroupKind. +func (f *GenericMappedFileFinder) ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.String, error) { m := f.branch.groupKind(gk).raw() - nsList := make([]string, 0, len(m)) + nsSet := sets.NewString() for ns := range m { - nsList = append(nsList, ns) + nsSet.Insert(ns) } - return nsList, nil + return nsSet, nil } -// ListObjectKeys returns a list of names (with optionally, the namespace). +// ListObjectIDs returns a list of unversioned ObjectIDs. // For namespaced GroupKinds, the caller must provide a namespace, and for // root-spaced GroupKinds, the caller must not. When namespaced, this function -// must only return object keys for that given namespace. -func (f *GenericMappedFileFinder) ListObjectKeys(ctx context.Context, _ core.AferoContext, gk core.GroupKind, namespace string) ([]core.ObjectKey, error) { +// must only return object IDs for that given namespace. If any of the given +// rules are violated, ErrNamespacedMismatch should be returned as a wrapped error. +func (f *GenericMappedFileFinder) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) ([]core.UnversionedObjectID, error) { m := f.branch.groupKind(gk).namespace(namespace).raw() - names := make([]core.ObjectKey, 0, len(m)) + ids := make([]core.UnversionedObjectID, 0, len(m)) for name := range m { - names = append(names, core.ObjectKey{Name: name, Namespace: namespace}) + ids = append(ids, core.NewUnversionedObjectID(gk, core.ObjectKey{Name: name, Namespace: namespace})) } - return names, nil + return ids, nil } -func (f *GenericMappedFileFinder) ContentTypeForPath(ctx context.Context, fs core.AferoContext, path string) (serializer.ContentType, error) { - return f.contentTyper.ContentTypeForPath(ctx, fs, path) +func (f *GenericMappedFileFinder) ContentType(ctx context.Context, id core.UnversionedObjectID) (serializer.ContentType, error) { + // First, get the path + p, err := f.ObjectPath(ctx, id) + if err != nil { + return "", err + } + // Then, ask the ContentTyper + return f.contentTyper.ContentTypeForPath(ctx, f.fs, p) } // GetMapping retrieves a mapping in the system @@ -128,8 +145,8 @@ func (f *GenericMappedFileFinder) SetMapping(ctx context.Context, id core.Unvers setName(id.ObjectKey().Name, checksumPath) } -// SetMappings replaces all mappings at once -func (f *GenericMappedFileFinder) SetMappings(ctx context.Context, m map[core.UnversionedObjectID]ChecksumPath) { +// ResetMappings replaces all mappings at once +func (f *GenericMappedFileFinder) ResetMappings(ctx context.Context, m map[core.UnversionedObjectID]ChecksumPath) { f.branch = &branchImpl{} for id, cp := range m { f.SetMapping(ctx, id, cp) diff --git a/pkg/storage/raw/filefinder_simple.go b/pkg/storage/raw/filefinder_simple.go index 0a053772..13df0f4b 100644 --- a/pkg/storage/raw/filefinder_simple.go +++ b/pkg/storage/raw/filefinder_simple.go @@ -10,6 +10,7 @@ import ( "github.com/weaveworks/libgitops/pkg/serializer" "github.com/weaveworks/libgitops/pkg/storage/core" + "k8s.io/apimachinery/pkg/util/sets" ) // NewSimpleStorage is a default opinionated constructor for a FilesystemStorage @@ -17,16 +18,33 @@ import ( // If you need more advanced customizablility than provided here, you can compose // the call to NewGenericFilesystemStorage yourself. func NewSimpleStorage(dir string, ct serializer.ContentType, namespacer core.Namespacer) (FilesystemStorage, error) { - fileFinder := &SimpleFileFinder{ + fs := core.AferoContextForLocalDir(dir) + fileFinder, err := NewSimpleFileFinder(fs, SimpleFileFinderOptions{ // ContentType is optional; JSON is used by default ContentType: ct, + }) + if err != nil { + return nil, err + } + // fileFinder and namespacer are validated by NewGenericFilesystemStorage. + return NewGenericFilesystemStorage(fileFinder, namespacer) +} + +func NewSimpleFileFinder(fs core.AferoContext, opts SimpleFileFinderOptions) (*SimpleFileFinder, error) { + if fs == nil { + return nil, fmt.Errorf("NewSimpleFileFinder: fs is mandatory") } - // dir and namespacer are validated by NewGenericFilesystemStorage. - return NewGenericFilesystemStorage(dir, fileFinder, namespacer) + return &SimpleFileFinder{fs: fs, opts: opts}, nil +} + +// isObjectIDNamespaced returns true if the ID is of a namespaced GroupKind, and +// false if the GroupKind is non-namespaced. NOTE: This ONLY works for FileFinders +// where the Storage has made sure that the namespacing conventions are followed. +func isObjectIDNamespaced(id core.UnversionedObjectID) bool { + return id.ObjectKey().Namespace != "" } var _ FileFinder = &SimpleFileFinder{} -var _ core.ContentTyper = &SimpleFileFinder{} // SimpleFileFinder is a FileFinder-compliant implementation that // stores Objects on disk using a straightforward directory layout. @@ -53,6 +71,11 @@ var _ core.ContentTyper = &SimpleFileFinder{} // // This FileFinder does not support the ObjectAt method. type SimpleFileFinder struct { + fs core.AferoContext + opts SimpleFileFinderOptions +} + +type SimpleFileFinderOptions struct { // Default: false; means enable group directory DisableGroupDirectory bool // Default: ""; means use file names as the means of storage @@ -63,11 +86,18 @@ type SimpleFileFinder struct { FileExtensionResolver core.FileExtensionResolver } +// TODO: Use group name "core" if group is "" to support core k8s objects. + +func (f *SimpleFileFinder) Filesystem() core.AferoContext { + return f.fs +} + // ObjectPath gets the file path relative to the root directory -func (f *SimpleFileFinder) ObjectPath(ctx context.Context, fs core.AferoContext, id core.UnversionedObjectID, namespaced bool) (string, error) { +func (f *SimpleFileFinder) ObjectPath(ctx context.Context, id core.UnversionedObjectID) (string, error) { // // paths := []string{f.kindKeyPath(id.GroupKind())} - if namespaced { + + if isObjectIDNamespaced(id) { // .// paths = append(paths, id.ObjectKey().Namespace) } @@ -76,18 +106,18 @@ func (f *SimpleFileFinder) ObjectPath(ctx context.Context, fs core.AferoContext, if err != nil { return "", err } - if f.SubDirectoryFileName == "" { + if f.opts.SubDirectoryFileName == "" { // ./. paths = append(paths, id.ObjectKey().Name+ext) } else { // .//. - paths = append(paths, id.ObjectKey().Name, f.SubDirectoryFileName+ext) + paths = append(paths, id.ObjectKey().Name, f.opts.SubDirectoryFileName+ext) } return filepath.Join(paths...), nil } func (f *SimpleFileFinder) kindKeyPath(gk core.GroupKind) string { - if f.DisableGroupDirectory { + if f.opts.DisableGroupDirectory { // .// return filepath.Join(gk.Kind) } @@ -97,27 +127,62 @@ func (f *SimpleFileFinder) kindKeyPath(gk core.GroupKind) string { // ObjectAt retrieves the ID containing the virtual path based // on the given physical file path. -func (f *SimpleFileFinder) ObjectAt(ctx context.Context, fs core.AferoContext, path string) (core.UnversionedObjectID, error) { +func (f *SimpleFileFinder) ObjectAt(ctx context.Context, path string) (core.UnversionedObjectID, error) { return nil, errors.New("not implemented") } -// ListNamespaces lists the available namespaces for the given GroupKind +// ContentType always returns f.ContentType, or ContentTypeJSON as a fallback if +// f.ContentType was not set. +func (f *SimpleFileFinder) ContentType(ctx context.Context, _ core.UnversionedObjectID) (serializer.ContentType, error) { + return f.contentType(), nil +} + +func (f *SimpleFileFinder) ext() (string, error) { + resolver := f.opts.FileExtensionResolver + if resolver == nil { + resolver = core.DefaultFileExtensionResolver + } + ext, err := resolver.ExtensionForContentType(f.contentType()) + if err != nil { + return "", err + } + return ext, nil +} + +func (f *SimpleFileFinder) contentType() serializer.ContentType { + if len(f.opts.ContentType) != 0 { + return f.opts.ContentType + } + return serializer.ContentTypeJSON +} + +// ListNamespaces lists the available namespaces for the given GroupKind. // This function shall only be called for namespaced objects, it is up to // the caller to make sure they do not call this method for root-spaced -// objects; for that the behavior is undefined (but returning an error -// is recommended). -func (f *SimpleFileFinder) ListNamespaces(ctx context.Context, fs core.AferoContext, gk core.GroupKind) ([]string, error) { - return readDir(ctx, fs, f.kindKeyPath(gk)) +// objects. If any of the given rules are violated, ErrNamespacedMismatch +// should be returned as a wrapped error. +// +// The implementer can choose between basing the answer strictly on e.g. +// v1.Namespace objects that exist in the system, or just the set of +// different namespaces that have been set on any object belonging to +// the given GroupKind. +func (f *SimpleFileFinder) ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.String, error) { + entries, err := readDir(ctx, f.fs, f.kindKeyPath(gk)) + if err != nil { + return nil, err + } + return sets.NewString(entries...), nil } -// ListObjectKeys returns a list of names (with optionally, the namespace). +// ListObjectIDs returns a list of unversioned ObjectIDs. // For namespaced GroupKinds, the caller must provide a namespace, and for // root-spaced GroupKinds, the caller must not. When namespaced, this function -// must only return object keys for that given namespace. -func (f *SimpleFileFinder) ListObjectKeys(ctx context.Context, fs core.AferoContext, gk core.GroupKind, namespace string) ([]core.ObjectKey, error) { +// must only return object IDs for that given namespace. If any of the given +// rules are violated, ErrNamespacedMismatch should be returned as a wrapped error. +func (f *SimpleFileFinder) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) ([]core.UnversionedObjectID, error) { // If namespace is empty, the names will be in ./, otherwise .// namesDir := filepath.Join(f.kindKeyPath(gk), namespace) - entries, err := readDir(ctx, fs, namesDir) + entries, err := readDir(ctx, f.fs, namesDir) if err != nil { return nil, err } @@ -126,15 +191,15 @@ func (f *SimpleFileFinder) ListObjectKeys(ctx context.Context, fs core.AferoCont if err != nil { return nil, err } - // Map the names to ObjectKeys - keys := make([]core.ObjectKey, 0, len(entries)) + // Map the names to UnversionedObjectIDs + ids := make([]core.UnversionedObjectID, 0, len(entries)) for _, entry := range entries { // Loop through all entries, and make sure they are sanitized .metadata.name's - if f.SubDirectoryFileName != "" { + if f.opts.SubDirectoryFileName != "" { // If f.SubDirectoryFileName != "", the file names already match .metadata.name // Make sure the metadata file ./<.metadata.name>/. actually exists - expectedPath := filepath.Join(namesDir, entry, f.SubDirectoryFileName+ext) - if exists, _ := fs.Exists(ctx, expectedPath); !exists { + expectedPath := filepath.Join(namesDir, entry, f.opts.SubDirectoryFileName+ext) + if exists, _ := f.fs.Exists(ctx, expectedPath); !exists { continue } } else { @@ -147,34 +212,9 @@ func (f *SimpleFileFinder) ListObjectKeys(ctx context.Context, fs core.AferoCont entry = strings.TrimSuffix(entry, ext) } // If we got this far, add the key to the list - keys = append(keys, core.ObjectKey{Name: entry, Namespace: namespace}) - } - return keys, nil -} - -// ContentTypeForPath always returns f.ContentType, or ContentTypeJSON as a fallback if -// f.ContentType was not set. -func (f *SimpleFileFinder) ContentTypeForPath(ctx context.Context, _ core.AferoContext, path string) (serializer.ContentType, error) { - return f.contentType(), nil -} - -func (f *SimpleFileFinder) ext() (string, error) { - resolver := f.FileExtensionResolver - if resolver == nil { - resolver = core.DefaultFileExtensionResolver - } - ext, err := resolver.ExtensionForContentType(f.contentType()) - if err != nil { - return "", err + ids = append(ids, core.NewUnversionedObjectID(gk, core.ObjectKey{Name: entry, Namespace: namespace})) } - return ext, nil -} - -func (f *SimpleFileFinder) contentType() serializer.ContentType { - if len(f.ContentType) != 0 { - return f.ContentType - } - return serializer.ContentTypeJSON + return ids, nil } func readDir(ctx context.Context, fs core.AferoContext, dir string) ([]string, error) { diff --git a/pkg/storage/raw/interfaces.go b/pkg/storage/raw/interfaces.go index 62f9d2c5..f49847d4 100644 --- a/pkg/storage/raw/interfaces.go +++ b/pkg/storage/raw/interfaces.go @@ -2,9 +2,17 @@ package raw import ( "context" + "errors" "github.com/weaveworks/libgitops/pkg/serializer" "github.com/weaveworks/libgitops/pkg/storage/core" + "k8s.io/apimachinery/pkg/util/sets" +) + +var ( + // ErrNamespacedMismatch is returned by Storage methods if the given UnversionedObjectID + // carries invalid data, according to the Namespacer. + ErrNamespacedMismatch = errors.New("mismatch between namespacing info for object and the given parameter") ) // Storage is a Key-indexed low-level interface to @@ -23,20 +31,18 @@ type Storage interface { Writer } -// Accessors allows access to lower-level interfaces needed by Storage. -type Accessors interface { +// StorageCommon is an interface that contains the resources both needed +// by Reader and Writer. +type StorageCommon interface { // Namespacer gives access to the namespacer that is used Namespacer() core.Namespacer - // Filesystem gets the underlying filesystem abstraction, if - // applicable. - Filesystem() core.AferoContext + // Exists checks if the resource indicated by the ID exists. + Exists(ctx context.Context, id core.UnversionedObjectID) bool } // Reader provides the read operations for the Storage. type Reader interface { - Accessors - - // Read operations + StorageCommon // Read returns a resource's content based on the ID. // If the resource does not exist, it returns core.NewErrNotFound. @@ -45,23 +51,40 @@ type Reader interface { // content type, and possibly, path on disk (in the case of // FilesystemStorage), or core.NewErrNotFound if not found Stat(ctx context.Context, id core.UnversionedObjectID) (ObjectInfo, error) - // Exists checks if the resource indicated by the ID exists. It is - // a shorthand for running Stat() and checking that error was nil. - Exists(ctx context.Context, id core.UnversionedObjectID) bool - // Checksum returns the ContentType. This operation must function - // also before the Object with the given id exists in the system, - // in order to support creating new Objects. - ContentType(ctx context.Context, id core.UnversionedObjectID) (serializer.ContentType, error) + // Resolve ContentType + ContentTypeResolver // List operations + Lister +} - // List returns all matching object keys based on the given KindKey. - // If the GroupKind is namespaced (according to the Namespacer), and - // namespace is empty: all namespaces are searched. If namespace in - // that case is set; only that namespace is searched. If the GroupKind - // is non-namespaced, and namespace is non-empty, an error is returned. - // TODO: Make this return []core.UnversionedObjectID instead? - List(ctx context.Context, gk core.GroupKind, namespace string) ([]core.ObjectKey, error) +type ContentTypeResolver interface { + // ContentType returns the content type that should be used when serializing + // the object with the given ID. This operation must function also before the + // Object with the given id exists in the system, in order to be able to + // create new Objects. + ContentType(ctx context.Context, id core.UnversionedObjectID) (serializer.ContentType, error) +} + +type Lister interface { + // ListNamespaces lists the available namespaces for the given GroupKind. + // This function shall only be called for namespaced objects, it is up to + // the caller to make sure they do not call this method for root-spaced + // objects. If any of the given rules are violated, ErrNamespacedMismatch + // should be returned as a wrapped error. + // + // The implementer can choose between basing the answer strictly on e.g. + // v1.Namespace objects that exist in the system, or just the set of + // different namespaces that have been set on any object belonging to + // the given GroupKind. + ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.String, error) + + // ListObjectIDs returns a list of unversioned ObjectIDs. + // For namespaced GroupKinds, the caller must provide a namespace, and for + // root-spaced GroupKinds, the caller must not. When namespaced, this function + // must only return object IDs for that given namespace. If any of the given + // rules are violated, ErrNamespacedMismatch should be returned as a wrapped error. + ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) ([]core.UnversionedObjectID, error) } // ObjectInfo is the return value from Storage.Stat(). It provides the @@ -99,9 +122,7 @@ type ChecksumContainer interface { // Reader provides the write operations for the Storage. type Writer interface { - Accessors - - // Write operations + StorageCommon // Write writes the given content to the resource indicated by the ID. // Error returns are implementation-specific. @@ -117,38 +138,36 @@ type Writer interface { type FilesystemStorage interface { Storage - // RootDirectory returns the root directory of this FilesystemStorage. - RootDirectory() string // FileFinder returns the underlying FileFinder used. + // TODO: Maybe one Storage can have multiple FileFinders? FileFinder() FileFinder } // FileFinder is a generic implementation for locating files on disk, to be // used by a FilesystemStorage. +// +// Important: The caller MUST guarantee that the implementation can figure +// out if the GroupKind is namespaced or not by the following check: +// +// namespaced := id.ObjectKey().Namespace != "" +// +// In other words, the caller must enforce a namespace being set for namespaced +// kinds, and namespace not being set for non-namespaced kinds. type FileFinder interface { - // FileFinder must be able to provide a ContentType for a path, although - // that path might not exist (i.e. in a create operation). - core.ContentTyper + // Filesystem gets the underlying filesystem abstraction, if + // applicable. + Filesystem() core.AferoContext // ObjectPath gets the file path relative to the root directory. // In order to support a create operation, this function must also return a valid path for // files that do not yet exist on disk. - ObjectPath(ctx context.Context, fs core.AferoContext, id core.UnversionedObjectID, namespaced bool) (string, error) + ObjectPath(ctx context.Context, id core.UnversionedObjectID) (string, error) // ObjectAt retrieves the ID based on the given relative file path to fs. - ObjectAt(ctx context.Context, fs core.AferoContext, path string) (core.UnversionedObjectID, error) - - // ListNamespaces lists the available namespaces for the given GroupKind - // This function shall only be called for namespaced objects, it is up to - // the caller to make sure they do not call this method for root-spaced - // objects; for that the behavior is undefined (but returning an error - // is recommended). - ListNamespaces(ctx context.Context, fs core.AferoContext, gk core.GroupKind) ([]string, error) - // ListObjectKeys returns a list of names (with optionally, the namespace). - // For namespaced GroupKinds, the caller must provide a namespace, and for - // root-spaced GroupKinds, the caller must not. When namespaced, this function - // must only return object keys for that given namespace. - // TODO: Make this return []core.UnversionedObjectID instead? - ListObjectKeys(ctx context.Context, fs core.AferoContext, gk core.GroupKind, namespace string) ([]core.ObjectKey, error) + ObjectAt(ctx context.Context, path string) (core.UnversionedObjectID, error) + // The FileFinder should be able to resolve the content type for various IDs + ContentTypeResolver + // The FileFinder should be able to list namespaces and Object IDs + Lister } // MappedFileFinder is an extension to FileFinder that allows it to have an internal @@ -166,8 +185,8 @@ type MappedFileFinder interface { // SetMapping binds an ID to a physical file path. This operation overwrites // any previous mapping for id. SetMapping(ctx context.Context, id core.UnversionedObjectID, checksumPath ChecksumPath) - // SetMappings replaces all mappings at once to the ones in m. - SetMappings(ctx context.Context, m map[core.UnversionedObjectID]ChecksumPath) + // ResetMappings replaces all mappings at once to the ones in m. + ResetMappings(ctx context.Context, m map[core.UnversionedObjectID]ChecksumPath) // DeleteMapping removes the mapping for the given id. DeleteMapping(ctx context.Context, id core.UnversionedObjectID) } @@ -188,6 +207,9 @@ type UnstructuredStorage interface { // ObjectRecognizer returns the underlying ObjectRecognizer used. ObjectRecognizer() core.ObjectRecognizer + // PathExcluder specifies what paths to not sync + // TODO: enable this + // PathExcluder() core.PathExcluder // MappedFileFinder returns the underlying MappedFileFinder used. MappedFileFinder() MappedFileFinder } diff --git a/pkg/storage/raw/rawstorage.go b/pkg/storage/raw/rawstorage.go index 70a0c192..fe2392f9 100644 --- a/pkg/storage/raw/rawstorage.go +++ b/pkg/storage/raw/rawstorage.go @@ -2,47 +2,29 @@ package raw import ( "context" - "errors" "fmt" "os" "path/filepath" "strconv" - "github.com/spf13/afero" "github.com/weaveworks/libgitops/pkg/serializer" "github.com/weaveworks/libgitops/pkg/storage/core" + "k8s.io/apimachinery/pkg/util/sets" ) // NewGenericFilesystemStorage creates a new GenericFilesystemStorage using the given lower-level -// interface implementations. dir, fileFinder and namespacer are required and must hence be non-nil. -// If AferoContext in the options is set, it must have its root directory set (using NewBasePathFs) -// exactly to dir. -func NewGenericFilesystemStorage(dir string, fileFinder FileFinder, namespacer core.Namespacer, opts ...GenericFilesystemStorageOption) (FilesystemStorage, error) { - if len(dir) == 0 { - return nil, fmt.Errorf("NewGenericFilesystemStorage: dir is mandatory") - } +// FileFinder and Namespacer. +func NewGenericFilesystemStorage(fileFinder FileFinder, namespacer core.Namespacer) (FilesystemStorage, error) { if fileFinder == nil { return nil, fmt.Errorf("NewGenericFilesystemStorage: fileFinder is mandatory") } if namespacer == nil { return nil, fmt.Errorf("NewGenericFilesystemStorage: namespacer is mandatory") } - // Parse the options - o := (&GenericFilesystemStorageOptions{}).ApplyOptions(opts) - if o.AferoContext == nil { - // Default to ignoring the context parameter, only seeing things relative - // to dir, and operating on the local disk. - - // TODO: Make a helper for this, and possibly also a RootDirectory() string - // method to AferoContext, to make it easier to detect if that exists. - o.AferoContext = core.AferoWithoutContext(afero.NewBasePathFs(afero.NewOsFs(), dir)) - } // else validate that the given AferoContext has root dir set to dir return &GenericFilesystemStorage{ - dir: dir, fileFinder: fileFinder, namespacer: namespacer, - fs: o.AferoContext, }, nil } @@ -54,61 +36,55 @@ func NewGenericFilesystemStorage(dir string, fileFinder FileFinder, namespacer c // alternatively, from info.Sys() returned from AferoContext.Stat(), if it can // be cast to a ChecksumContainer. type GenericFilesystemStorage struct { - dir string fileFinder FileFinder namespacer core.Namespacer - fs core.AferoContext } func (r *GenericFilesystemStorage) Namespacer() core.Namespacer { return r.namespacer } -func (r *GenericFilesystemStorage) Filesystem() core.AferoContext { - return r.fs -} - func (r *GenericFilesystemStorage) FileFinder() FileFinder { return r.fileFinder } -func (r *GenericFilesystemStorage) RootDirectory() string { - return r.dir -} - func (r *GenericFilesystemStorage) Read(ctx context.Context, id core.UnversionedObjectID) ([]byte, error) { - // Check if the resource indicated by key exists - if !r.Exists(ctx, id) { - return nil, core.NewErrNotFound(id) - } - // Get the path + // Get the path and verify namespacing info p, err := r.getPath(ctx, id) if err != nil { return nil, err } + // Check if the resource indicated by key exists + if !r.exists(ctx, p) { + return nil, core.NewErrNotFound(id) + } // Read the file - return r.fs.ReadFile(ctx, p) + return r.FileFinder().Filesystem().ReadFile(ctx, p) } func (r *GenericFilesystemStorage) Exists(ctx context.Context, id core.UnversionedObjectID) bool { - // Get the path + // Get the path and verify namespacing info p, err := r.getPath(ctx, id) if err != nil { return false } - exists, _ := r.fs.Exists(ctx, p) + return r.exists(ctx, p) +} + +func (r *GenericFilesystemStorage) exists(ctx context.Context, path string) bool { + exists, _ := r.FileFinder().Filesystem().Exists(ctx, path) return exists } func (r *GenericFilesystemStorage) Stat(ctx context.Context, id core.UnversionedObjectID) (ObjectInfo, error) { - // Get the path + // Get the path and verify namespacing info p, err := r.getPath(ctx, id) if err != nil { return nil, err } // Stat the file - info, err := r.fs.Stat(ctx, p) + info, err := r.FileFinder().Filesystem().Stat(ctx, p) if os.IsNotExist(err) { return nil, core.NewErrNotFound(id) } else if err != nil { @@ -123,7 +99,7 @@ func (r *GenericFilesystemStorage) Stat(ctx context.Context, id core.Unversioned } // Get content type - contentType, err := r.contentType(ctx, p) + contentType, err := r.ContentType(ctx, id) if err != nil { return nil, err } @@ -137,103 +113,107 @@ func (r *GenericFilesystemStorage) Stat(ctx context.Context, id core.Unversioned } func (r *GenericFilesystemStorage) ContentType(ctx context.Context, id core.UnversionedObjectID) (serializer.ContentType, error) { - // Get the path - p, err := r.getPath(ctx, id) - if err != nil { - return serializer.ContentType(""), err + // Verify namespacing info + if err := r.verifyID(id); err != nil { + return "", err } - // Resolve the content type for the path - return r.contentType(ctx, p) -} -func (r *GenericFilesystemStorage) contentType(ctx context.Context, p string) (serializer.ContentType, error) { - return r.fileFinder.ContentTypeForPath(ctx, r.fs, p) + return r.FileFinder().ContentType(ctx, id) } func (r *GenericFilesystemStorage) Write(ctx context.Context, id core.UnversionedObjectID, content []byte) error { - // Get the path + // Get the path and verify namespacing info p, err := r.getPath(ctx, id) if err != nil { return err } + // Create the underlying directories if they do not exist already - if !r.Exists(ctx, id) { - if err := r.fs.MkdirAll(ctx, filepath.Dir(p), 0755); err != nil { + if !r.exists(ctx, p) { + if err := r.FileFinder().Filesystem().MkdirAll(ctx, filepath.Dir(p), 0755); err != nil { return err } } // Write the file content - return r.fs.WriteFile(ctx, p, content, 0664) + return r.FileFinder().Filesystem().WriteFile(ctx, p, content, 0664) } func (r *GenericFilesystemStorage) Delete(ctx context.Context, id core.UnversionedObjectID) error { - // Check if the resource indicated by key exists - if !r.Exists(ctx, id) { - return core.NewErrNotFound(id) - } - // Get the path + // Get the path and verify namespacing info p, err := r.getPath(ctx, id) if err != nil { return err } + + // Check if the resource indicated by key exists + if !r.exists(ctx, p) { + return core.NewErrNotFound(id) + } // Remove the file - return r.fs.Remove(ctx, p) + return r.FileFinder().Filesystem().Remove(ctx, p) } -func (r *GenericFilesystemStorage) List(ctx context.Context, gk core.GroupKind, filterNs string) ([]core.ObjectKey, error) { - // Get namespacing info - namespaced, err := r.isNamespaced(gk) +// ListNamespaces lists the available namespaces for the given GroupKind. +// This function shall only be called for namespaced objects, it is up to +// the caller to make sure they do not call this method for root-spaced +// objects; for that the behavior is undefined (but returning an error +// is recommended). +func (r *GenericFilesystemStorage) ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.String, error) { + namespaced, err := r.namespacer.IsNamespaced(gk) if err != nil { return nil, err } - + // Validate the groupkind if !namespaced { - // Make sure we don't have invalid input - if len(filterNs) != 0 { - return nil, errors.New("must not specify namespace filter for non-namespaced resource") - } - // Return the non-namespaced ObjectKeys from the FileFinder - return r.fileFinder.ListObjectKeys(ctx, r.fs, gk, "") - } - - // If filterNs is given, only search the given namespace - var namespaces []string - if len(filterNs) != 0 { - namespaces = []string{filterNs} - } else { - // Otherwise, list and loop all namespaces available for this GroupKind - namespaces, err = r.fileFinder.ListNamespaces(ctx, r.fs, gk) - if err != nil { - return nil, err - } + return nil, fmt.Errorf("%w: cannot list namespaces for non-namespaced kind: %v", ErrNamespacedMismatch, gk) } + // Just use the underlying filefinder + return r.FileFinder().ListNamespaces(ctx, gk) +} - // List keys for each namespace, and add to the keys slice - keys := []core.ObjectKey{} - for _, namespace := range namespaces { - newKeys, err := r.fileFinder.ListObjectKeys(ctx, r.fs, gk, namespace) - if err != nil { - return nil, err - } - keys = append(keys, newKeys...) +// ListObjectIDs returns a list of unversioned ObjectIDs. +// For namespaced GroupKinds, the caller must provide a namespace, and for +// root-spaced GroupKinds, the caller must not. When namespaced, this function +// must only return object IDs for that given namespace. +func (r *GenericFilesystemStorage) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) ([]core.UnversionedObjectID, error) { + // Validate the namespace parameter + if err := VerifyNamespaced(r.Namespacer(), gk, namespace); err != nil { + return nil, err } - return keys, nil + // Just use the underlying filefinder + return r.FileFinder().ListObjectIDs(ctx, gk, namespace) } func (r *GenericFilesystemStorage) getPath(ctx context.Context, id core.UnversionedObjectID) (string, error) { - // Get namespacing info - namespaced, err := r.isNamespaced(id.GroupKind()) - if err != nil { + // Verify namespacing info + if err := r.verifyID(id); err != nil { return "", err } // Get the path - return r.fileFinder.ObjectPath(ctx, r.fs, id, namespaced) + return r.FileFinder().ObjectPath(ctx, id) } -func (r *GenericFilesystemStorage) isNamespaced(gk core.GroupKind) (bool, error) { - return r.namespacer.IsNamespaced(gk) +func (r *GenericFilesystemStorage) verifyID(id core.UnversionedObjectID) error { + return VerifyNamespaced(r.Namespacer(), id.GroupKind(), id.ObjectKey().Namespace) } +// TODO: Move to the Filesystem abstraction func checksumFromFileInfo(fi os.FileInfo) string { return strconv.FormatInt(fi.ModTime().UnixNano(), 10) } + +// VerifyNamespaced verifies that the given GroupKind and namespace parameter follows +// the rule of the Namespacer. +func VerifyNamespaced(namespacer core.Namespacer, gk core.GroupKind, ns string) error { + // Get namespacing info + namespaced, err := namespacer.IsNamespaced(gk) + if err != nil { + return err + } + if namespaced && ns == "" { + return fmt.Errorf("%w: namespaced kind %v requires non-empty namespace", ErrNamespacedMismatch, gk) + } else if !namespaced && ns != "" { + return fmt.Errorf("%w: non-namespaced kind %v must not have namespace parameter set", ErrNamespacedMismatch, gk) + } + return nil +} diff --git a/pkg/storage/raw/rawstorage_options.go b/pkg/storage/raw/rawstorage_options.go deleted file mode 100644 index 7cb59278..00000000 --- a/pkg/storage/raw/rawstorage_options.go +++ /dev/null @@ -1,33 +0,0 @@ -package raw - -import ( - "github.com/weaveworks/libgitops/pkg/storage/core" -) - -type GenericFilesystemStorageOption interface { - ApplyToGenericFilesystemStorage(*GenericFilesystemStorageOptions) -} - -var _ GenericFilesystemStorageOption = &GenericFilesystemStorageOptions{} - -// GenericFilesystemStorageOptions specifies optional options for -// NewGenericFilesystemStorage. -type GenericFilesystemStorageOptions struct { - // AferoContext specifies a filesystem abstraction implementation. - // Default: An implementation scoped under the given root directory, - // operating on the local disk. - AferoContext core.AferoContext -} - -func (o *GenericFilesystemStorageOptions) ApplyToGenericFilesystemStorage(target *GenericFilesystemStorageOptions) { - if o.AferoContext != nil { - target.AferoContext = o.AferoContext - } -} - -func (o *GenericFilesystemStorageOptions) ApplyOptions(opts []GenericFilesystemStorageOption) *GenericFilesystemStorageOptions { - for _, opt := range opts { - opt.ApplyToGenericFilesystemStorage(o) - } - return o -} diff --git a/pkg/storage/raw/watch/inotify/filewatcher.go b/pkg/storage/raw/watch/inotify/filewatcher.go index 0056703d..4524c8d2 100644 --- a/pkg/storage/raw/watch/inotify/filewatcher.go +++ b/pkg/storage/raw/watch/inotify/filewatcher.go @@ -61,7 +61,7 @@ func NewFileWatcher(dir string, opts ...FileWatcherOption) (watch.FileEventsEmit opts: *o, // afero operates on the local disk, but is by convention scoped to the local // directory that is being watched - afero: core.AferoWithoutContext(afero.NewBasePathFs(afero.NewOsFs(), dir)), + afero: core.AferoContextWrapperForDir(afero.NewOsFs(), dir), batcher: sync.NewBatchWriter(o.BatchTimeout), } diff --git a/pkg/storage/raw/watch/manifest/manifest.go b/pkg/storage/raw/watch/manifest/manifest.go index b9728c78..793e453c 100644 --- a/pkg/storage/raw/watch/manifest/manifest.go +++ b/pkg/storage/raw/watch/manifest/manifest.go @@ -17,8 +17,9 @@ func NewManifestStorage( recognizer core.ObjectRecognizer, pathExcluder core.PathExcluder, ) (watch.UnstructuredEventStorage, error) { - fileFinder := raw.NewGenericMappedFileFinder(contentTyper) - fsRaw, err := raw.NewGenericFilesystemStorage(dir, fileFinder, namespacer) + fs := core.AferoContextForLocalDir(dir) + fileFinder := raw.NewGenericMappedFileFinder(contentTyper, fs) + fsRaw, err := raw.NewGenericFilesystemStorage(fileFinder, namespacer) if err != nil { return nil, err } diff --git a/pkg/storage/raw/watch/watch.go b/pkg/storage/raw/watch/watch.go index a4ce3bd8..0c307919 100644 --- a/pkg/storage/raw/watch/watch.go +++ b/pkg/storage/raw/watch/watch.go @@ -136,7 +136,7 @@ func (s *GenericUnstructuredEventStorage) Sync(ctx context.Context) error { // TODO: Alternatively/also, we should support feeding an // UnstructuredStorage, so that we can run its Sync() function instead - content, err := s.Filesystem().ReadFile(ctx, file) + content, err := s.FileFinder().Filesystem().ReadFile(ctx, file) if err != nil { logrus.Warnf("Ignoring %q: %v", file, err) continue @@ -160,7 +160,7 @@ func (s *GenericUnstructuredEventStorage) Sync(ctx context.Context) error { // Write writes the given content to the resource indicated by the ID. // Error returns are implementation-specific. func (s *GenericUnstructuredEventStorage) Write(ctx context.Context, id core.UnversionedObjectID, content []byte) error { - // Get the path + // Get the path and verify namespacing info p, err := s.getPath(ctx, id) if err != nil { return err @@ -174,7 +174,7 @@ func (s *GenericUnstructuredEventStorage) Write(ctx context.Context, id core.Unv // Delete deletes the resource indicated by the ID. // If the resource does not exist, it returns ErrNotFound. func (s *GenericUnstructuredEventStorage) Delete(ctx context.Context, id core.UnversionedObjectID) error { - // Get the path + // Get the path and verify namespacing info p, err := s.getPath(ctx, id) if err != nil { return err @@ -186,13 +186,12 @@ func (s *GenericUnstructuredEventStorage) Delete(ctx context.Context, id core.Un } func (s *GenericUnstructuredEventStorage) getPath(ctx context.Context, id core.UnversionedObjectID) (string, error) { - // Get namespacing info - namespaced, err := s.Namespacer().IsNamespaced(id.GroupKind()) - if err != nil { + // Verify namespacing info + if err := raw.VerifyNamespaced(s.Namespacer(), id.GroupKind(), id.ObjectKey().Namespace); err != nil { return "", err } // Get the path - return s.FileFinder().ObjectPath(ctx, s.Filesystem(), id, namespaced) + return s.FileFinder().ObjectPath(ctx, id) } func (s *GenericUnstructuredEventStorage) Close() error { @@ -239,7 +238,7 @@ func (s *GenericUnstructuredEventStorage) handleDelete(ctx context.Context, even // the known objects in such a way that it is able to do the reverse-lookup. For // mapped FileFinders, by this point the path should still be in the local cache, // which should make us able to get the ID before deleted from the cache. - objectID, err := s.fileFinder.ObjectAt(ctx, s.Filesystem(), event.Path) + objectID, err := s.fileFinder.ObjectAt(ctx, event.Path) if err != nil { return fmt.Errorf("failed to reverse lookup ID for deleted file %q: %v", event.Path, err) } @@ -253,7 +252,7 @@ func (s *GenericUnstructuredEventStorage) handleDelete(ctx context.Context, even func (s *GenericUnstructuredEventStorage) handleModifyMove(ctx context.Context, event *FileEvent) error { // Read the content of this modified, moved or created file - content, err := s.Filesystem().ReadFile(ctx, event.Path) + content, err := s.FileFinder().Filesystem().ReadFile(ctx, event.Path) if err != nil { return fmt.Errorf("could not read %q: %v", event.Path, err) } From 8fdce647ae64822469da0dcd5c14c0c771b305d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Sat, 23 Jan 2021 21:45:20 +0200 Subject: [PATCH 048/149] Move backend and client into their own subdirectories of pkg/storage --- pkg/storage/{ => backend}/backend.go | 127 +++++++++----- pkg/storage/{storage.go => client/client.go} | 165 +++++++------------ pkg/storage/{ => client}/options.go | 2 +- pkg/storage/{ => client}/utils.go | 2 +- pkg/storage/raw/filefinder_simple.go | 7 +- 5 files changed, 153 insertions(+), 150 deletions(-) rename pkg/storage/{ => backend}/backend.go (68%) rename pkg/storage/{storage.go => client/client.go} (65%) rename pkg/storage/{ => client}/options.go (99%) rename pkg/storage/{ => client}/utils.go (96%) diff --git a/pkg/storage/backend.go b/pkg/storage/backend/backend.go similarity index 68% rename from pkg/storage/backend.go rename to pkg/storage/backend/backend.go index da46699c..6706002c 100644 --- a/pkg/storage/backend.go +++ b/pkg/storage/backend/backend.go @@ -1,8 +1,10 @@ -package storage +package backend import ( "bytes" "context" + "errors" + "fmt" "github.com/weaveworks/libgitops/pkg/serializer" "github.com/weaveworks/libgitops/pkg/storage/core" @@ -12,6 +14,16 @@ import ( "k8s.io/apimachinery/pkg/util/sets" ) +var ( + // ErrCannotSaveMetadata is returned if the user tries to save metadata-only objects + ErrCannotSaveMetadata = errors.New("cannot save (Create|Update|Patch) *metav1.PartialObjectMetadata") + // ErrNameRequired is returned when .metadata.name is unset + // TODO: Support generateName? + ErrNameRequired = errors.New(".metadata.name is required") + + namespaceGVK = core.GroupVersionKind{Group: "", Version: "v1", Kind: "Namespace"} +) + // TODO: Make a *core.Unknown that has // 1. TypeMeta // 2. DeepCopies (for Object compatibility), @@ -20,32 +32,44 @@ import ( // 5. Status { Data []byte, ContentType ContentType, Object interface{} } // TODO: Need to make sure we never write this internal struct to disk (MarshalJSON error?) -type BackendAccessors interface { +type Accessors interface { Storage() raw.Storage NamespaceEnforcer() core.NamespaceEnforcer Scheme() *runtime.Scheme - Validator() BackendValidator +} + +type WriteAccessors interface { + Validator() Validator StorageVersioner() StorageVersioner } -type BackendReader interface { - BackendAccessors +type Reader interface { + Accessors Get(ctx context.Context, obj core.Object) error raw.Lister } -type BackendWriter interface { - BackendAccessors +type Writer interface { + Accessors + WriteAccessors Create(ctx context.Context, obj core.Object) error Update(ctx context.Context, obj core.Object) error Delete(ctx context.Context, obj core.Object) error } +type StatusWriter interface { + Accessors + WriteAccessors + + UpdateStatus(ctx context.Context, obj core.Object) error +} + type Backend interface { - BackendReader - BackendWriter + Reader + Writer + StatusWriter } type ChangeOperation string @@ -56,23 +80,33 @@ const ( ChangeOperationDelete ChangeOperation = "delete" ) -type BackendValidator interface { - ValidateChange(ctx context.Context, backend BackendReader, op ChangeOperation, obj core.Object) error +type Validator interface { + ValidateChange(ctx context.Context, backend Reader, op ChangeOperation, obj core.Object) error } type StorageVersioner interface { + // TODO: Do we need the context here? StorageVersion(ctx context.Context, id core.ObjectID) (core.GroupVersion, error) } -func NewGenericBackend( +func NewGeneric( storage raw.Storage, serializer serializer.Serializer, // TODO: only scheme required, encode/decode optional? enforcer core.NamespaceEnforcer, - validator BackendValidator, // TODO: optional? + validator Validator, // TODO: optional? versioner StorageVersioner, // TODO: optional? -) (*GenericBackend, error) { +) (*Generic, error) { + if storage == nil { + return nil, fmt.Errorf("storage is mandatory") + } + if serializer == nil { // TODO: relax this to scheme, and add encoder/decoder to opts? + return nil, fmt.Errorf("serializer is mandatory") + } + if enforcer == nil { + return nil, fmt.Errorf("enforcer is mandatory") + } // TODO: validate options - return &GenericBackend{ + return &Generic{ scheme: serializer.Scheme(), encoder: serializer.Encoder(), decoder: serializer.Decoder(), @@ -84,40 +118,40 @@ func NewGenericBackend( }, nil } -var _ Backend = &GenericBackend{} +var _ Backend = &Generic{} -type GenericBackend struct { +type Generic struct { scheme *runtime.Scheme decoder serializer.Decoder encoder serializer.Encoder storage raw.Storage enforcer core.NamespaceEnforcer - validator BackendValidator + validator Validator versioner StorageVersioner } -func (b *GenericBackend) Scheme() *runtime.Scheme { +func (b *Generic) Scheme() *runtime.Scheme { return b.scheme } -func (b *GenericBackend) Storage() raw.Storage { +func (b *Generic) Storage() raw.Storage { return b.storage } -func (b *GenericBackend) NamespaceEnforcer() core.NamespaceEnforcer { +func (b *Generic) NamespaceEnforcer() core.NamespaceEnforcer { return b.enforcer } -func (b *GenericBackend) Validator() BackendValidator { +func (b *Generic) Validator() Validator { return b.validator } -func (b *GenericBackend) StorageVersioner() StorageVersioner { +func (b *Generic) StorageVersioner() StorageVersioner { return b.versioner } -func (b *GenericBackend) Get(ctx context.Context, obj core.Object) error { +func (b *Generic) Get(ctx context.Context, obj core.Object) error { // Get the versioned ID for the given obj. This might mutate obj wrt namespacing info. id, err := b.idForObj(ctx, obj) if err != nil { @@ -144,7 +178,7 @@ func (b *GenericBackend) Get(ctx context.Context, obj core.Object) error { // the caller to make sure they do not call this method for root-spaced // objects; for that the behavior is undefined (but returning an error // is recommended). -func (b *GenericBackend) ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.String, error) { +func (b *Generic) ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.String, error) { return b.storage.ListNamespaces(ctx, gk) } @@ -152,11 +186,11 @@ func (b *GenericBackend) ListNamespaces(ctx context.Context, gk core.GroupKind) // For namespaced GroupKinds, the caller must provide a namespace, and for // root-spaced GroupKinds, the caller must not. When namespaced, this function // must only return object keys for that given namespace. -func (b *GenericBackend) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) ([]core.UnversionedObjectID, error) { +func (b *Generic) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) ([]core.UnversionedObjectID, error) { return b.storage.ListObjectIDs(ctx, gk, namespace) } -func (b *GenericBackend) Create(ctx context.Context, obj core.Object) error { +func (b *Generic) Create(ctx context.Context, obj core.Object) error { // We must never save metadata-only structs if serializer.IsPartialObject(obj) { return ErrCannotSaveMetadata @@ -175,14 +209,16 @@ func (b *GenericBackend) Create(ctx context.Context, obj core.Object) error { // Validate that the change is ok // TODO: Don't make "upcasting" possible here - if err := b.validator.ValidateChange(ctx, b, ChangeOperationCreate, obj); err != nil { - return err + if b.validator != nil { + if err := b.validator.ValidateChange(ctx, b, ChangeOperationCreate, obj); err != nil { + return err + } } // Internal, common write shared with Update() return b.write(ctx, id, obj) } -func (b *GenericBackend) Update(ctx context.Context, obj core.Object) error { +func (b *Generic) Update(ctx context.Context, obj core.Object) error { // We must never save metadata-only structs if serializer.IsPartialObject(obj) { return ErrCannotSaveMetadata @@ -201,25 +237,32 @@ func (b *GenericBackend) Update(ctx context.Context, obj core.Object) error { // Validate that the change is ok // TODO: Don't make "upcasting" possible here - if err := b.validator.ValidateChange(ctx, b, ChangeOperationUpdate, obj); err != nil { - return err + if b.validator != nil { + if err := b.validator.ValidateChange(ctx, b, ChangeOperationUpdate, obj); err != nil { + return err + } } // Internal, common write shared with Create() return b.write(ctx, id, obj) } -func (b *GenericBackend) write(ctx context.Context, id core.ObjectID, obj core.Object) error { +func (b *Generic) UpdateStatus(ctx context.Context, obj core.Object) error { + return core.ErrNotImplemented // TODO +} + +func (b *Generic) write(ctx context.Context, id core.ObjectID, obj core.Object) error { // TODO: Figure out how to get ContentType before the object actually exists! ct, err := b.storage.ContentType(ctx, id) if err != nil { return err } - // Get the given storage version + // Resolve the desired storage version + /* TODO: re-enable later gv, err := b.versioner.StorageVersion(ctx, id) if err != nil { return err - } + }*/ // Set creationTimestamp if not already populated t := obj.GetCreationTimestamp() @@ -229,7 +272,7 @@ func (b *GenericBackend) write(ctx context.Context, id core.ObjectID, obj core.O var objBytes bytes.Buffer // TODO: Work with any ContentType, not just JSON/YAML. Or, make a SingleFrameWriter for any ct. - err = b.encoder.EncodeForGroupVersion(serializer.NewFrameWriter(ct, &objBytes), obj, gv) + err = b.encoder.Encode(serializer.NewFrameWriter(ct, &objBytes), obj) if err != nil { return err } @@ -237,7 +280,7 @@ func (b *GenericBackend) write(ctx context.Context, id core.ObjectID, obj core.O return b.storage.Write(ctx, id, objBytes.Bytes()) } -func (b *GenericBackend) Delete(ctx context.Context, obj core.Object) error { +func (b *Generic) Delete(ctx context.Context, obj core.Object) error { // Get the versioned ID for the given obj. This might mutate obj wrt namespacing info. id, err := b.idForObj(ctx, obj) if err != nil { @@ -251,8 +294,10 @@ func (b *GenericBackend) Delete(ctx context.Context, obj core.Object) error { // Validate that the change is ok // TODO: Don't make "upcasting" possible here - if err := b.validator.ValidateChange(ctx, b, ChangeOperationDelete, obj); err != nil { - return err + if b.validator != nil { + if err := b.validator.ValidateChange(ctx, b, ChangeOperationDelete, obj); err != nil { + return err + } } // Delete it from the underlying storage @@ -260,7 +305,7 @@ func (b *GenericBackend) Delete(ctx context.Context, obj core.Object) error { } // Note: This should also work for unstructured and partial metadata objects -func (b *GenericBackend) idForObj(ctx context.Context, obj core.Object) (core.ObjectID, error) { +func (b *Generic) idForObj(ctx context.Context, obj core.Object) (core.ObjectID, error) { gvk, err := serializer.GVKForObject(b.scheme, obj) if err != nil { return nil, err @@ -281,7 +326,7 @@ func (b *GenericBackend) idForObj(ctx context.Context, obj core.Object) (core.Ob // If the namespace enforcer requires listing all the other namespaces, // look them up if b.enforcer.RequireSetNamespaceExists() { - objIDs, err := b.storage.ListObjectIDs(ctx, v1GroupKind.WithKind("Namespace").GroupKind(), "") + objIDs, err := b.storage.ListObjectIDs(ctx, namespaceGVK.GroupKind(), "") if err != nil { return nil, err } diff --git a/pkg/storage/storage.go b/pkg/storage/client/client.go similarity index 65% rename from pkg/storage/storage.go rename to pkg/storage/client/client.go index 9b9bde35..0f48974e 100644 --- a/pkg/storage/storage.go +++ b/pkg/storage/client/client.go @@ -1,4 +1,4 @@ -package storage +package client import ( "context" @@ -9,110 +9,77 @@ import ( "github.com/fluxcd/go-git-providers/validation" "github.com/weaveworks/libgitops/pkg/filter" "github.com/weaveworks/libgitops/pkg/serializer" + "github.com/weaveworks/libgitops/pkg/storage/backend" "github.com/weaveworks/libgitops/pkg/storage/core" patchutil "github.com/weaveworks/libgitops/pkg/util/patch" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" kruntime "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/controller-runtime/pkg/client" ) -// TODO: Rename to Client? Talk objects to the "Storage" part instead? -// TODO: Make it possible to specify the "storage version" manually? // TODO: Pass an ObjectID that contains all PartialObjectMetadata info for "downstream" consumers // that can make use of it by "casting up". var ( - // ErrCannotSaveMetadata is returned if the user tries to save metadata-only objects - ErrCannotSaveMetadata = errors.New("cannot save (Create|Update|Patch) *metav1.PartialObjectMetadata") - // ErrNameRequired is returned when .metadata.name is unset - // TODO: Support generateName? - ErrNameRequired = errors.New(".metadata.name is required") // ErrUnsupportedPatchType is returned when an unsupported patch type is used ErrUnsupportedPatchType = errors.New("unsupported patch type") ) -const ( - namespaceListKind = "NamespaceList" -) - -var v1GroupKind = schema.GroupVersion{Group: "", Version: "v1"} - -type CommonStorage interface { - // - // Access to underlying Resources. - // - - // RawStorage returns the RawStorage instance backing this Storage - // It is expected that RawStorage only operates on one "frame" at a time in its Read/Write operations. - //RawStorage() raw.Storage - // Serializer returns the serializer - //Serializer() serializer.Serializer - Backend() Backend - - // - // Misc methods. - // - - // Close closes all underlying resources (e.g. goroutines) used; before the application exits - // TODO: Maybe this instead should apply to raw.Storage's now? - Close() error - // io.Closer -} - -// ReadStorage TODO -type ReadStorage interface { - CommonStorage - +type Reader interface { client.Reader - // TODO: In the future to support indexing "custom" fields. - // Normal fields (not counting arrays) could be supported using - // kruntime.DefaultUnstructuredConverter.ToUnstructured() in - // filter.FieldFilter - // client.FieldIndexer + BackendReader() backend.Reader } -type WriteStorage interface { - CommonStorage +type Writer interface { client.Writer - //client.StatusClient + BackendWriter() backend.Writer +} + +type StatusClient interface { + client.StatusClient + BackendStatusWriter() backend.StatusWriter } -// Storage is an interface for persisting and retrieving API objects to/from a backend -// One Storage instance handles all different Kinds of Objects -type Storage interface { - ReadStorage - WriteStorage +// Client is an interface for persisting and retrieving API objects to/from a backend +// One Client instance handles all different Kinds of Objects +type Client interface { + Reader + Writer + // TODO: StatusClient //client.Client } -// NewGenericStorage constructs a new Storage -func NewGenericStorage(backend Backend, patcher serializer.Patcher) Storage { - return &storage{backend, patcher} +// NewGeneric constructs a new Generic client +// TODO: Construct the default patcher from the given scheme, make patcher an opt instead +func NewGeneric(backend backend.Backend, patcher serializer.Patcher) (*Generic, error) { + if backend == nil { + return nil, fmt.Errorf("backend is mandatory") + } + return &Generic{backend, patcher}, nil } -// storage implements the Storage interface -type storage struct { - backend Backend +// Generic implements the Client interface +type Generic struct { + backend backend.Backend patcher serializer.Patcher } -var _ Storage = &storage{} +var _ Client = &Generic{} -func (s *storage) Backend() Backend { - return s.backend -} +func (c *Generic) Backend() backend.Backend { return c.backend } +func (c *Generic) BackendReader() backend.Reader { return c.backend } +func (c *Generic) BackendWriter() backend.Writer { return c.backend } // Get returns a new Object for the resource at the specified kind/uid path, based on the file content. // In order to only extract the metadata of this object, pass in a *metav1.PartialObjectMetadata -func (s *storage) Get(ctx context.Context, key core.ObjectKey, obj core.Object) error { +func (c *Generic) Get(ctx context.Context, key core.ObjectKey, obj core.Object) error { obj.SetName(key.Name) obj.SetNamespace(key.Namespace) - return s.backend.Get(ctx, obj) + return c.backend.Get(ctx, obj) } // List lists Objects for the specific kind. Optionally, filters can be applied (see the filter package @@ -123,9 +90,9 @@ func (s *storage) Get(ctx context.Context, key core.ObjectKey, obj core.Object) // you need to populate TypeMeta with the GVK you want back. // TODO: Check if this works with metav1.List{} // TODO: Create constructors for the different kinds of lists? -func (s *storage) List(ctx context.Context, list core.ObjectList, opts ...client.ListOption) error { +func (c *Generic) List(ctx context.Context, list core.ObjectList, opts ...client.ListOption) error { // This call will verify that list actually is a List type. - gvk, err := serializer.GVKForList(list, s.backend.Scheme()) + gvk, err := serializer.GVKForList(list, c.Backend().Scheme()) if err != nil { return err } @@ -134,7 +101,7 @@ func (s *storage) List(ctx context.Context, list core.ObjectList, opts ...client // Get namespacing info gk := gvk.GroupKind() - namespaced, err := s.backend.Storage().Namespacer().IsNamespaced(gk) + namespaced, err := c.Backend().Storage().Namespacer().IsNamespaced(gk) if err != nil { return err } @@ -145,7 +112,7 @@ func (s *storage) List(ctx context.Context, list core.ObjectList, opts ...client // However, if the GroupKind is namespaced, and the given "filter namespace" in list // options is empty, it means that one should list all namespaces if namespaced && listOpts.Namespace == "" { - namespaces, err = s.backend.ListNamespaces(ctx, gk) + namespaces, err = c.Backend().ListNamespaces(ctx, gk) if err != nil { return err } @@ -155,7 +122,7 @@ func (s *storage) List(ctx context.Context, list core.ObjectList, opts ...client allIDs := []core.UnversionedObjectID{} for ns := range namespaces { - ids, err := s.backend.ListObjectIDs(ctx, gk, ns) + ids, err := c.Backend().ListObjectIDs(ctx, gk, ns) if err != nil { return err } @@ -168,13 +135,13 @@ func (s *storage) List(ctx context.Context, list core.ObjectList, opts ...client wg.Add(1) var processErr error go func() { - createFunc := createObject(gvk, s.backend.Scheme()) + createFunc := createObject(gvk, c.Backend().Scheme()) if serializer.IsPartialObjectList(list) { createFunc = createPartialObject(gvk) } else if serializer.IsUnstructuredList(list) { createFunc = createUnstructuredObject(gvk) } - processErr = s.processKeys(ctx, allIDs, &listOpts.FilterOptions, createFunc, ch) + processErr = c.processKeys(ctx, allIDs, &listOpts.FilterOptions, createFunc, ch) wg.Done() }() @@ -193,19 +160,19 @@ func (s *storage) List(ctx context.Context, list core.ObjectList, opts ...client return nil } -func (s *storage) Create(ctx context.Context, obj core.Object, _ ...client.CreateOption) error { - return s.backend.Create(ctx, obj) +func (c *Generic) Create(ctx context.Context, obj core.Object, _ ...client.CreateOption) error { + return c.backend.Create(ctx, obj) } -func (s *storage) Update(ctx context.Context, obj core.Object, _ ...client.UpdateOption) error { - return s.backend.Update(ctx, obj) +func (c *Generic) Update(ctx context.Context, obj core.Object, _ ...client.UpdateOption) error { + return c.backend.Update(ctx, obj) } // Patch performs a strategic merge patch on the object with the given UID, using the byte-encoded patch given -func (s *storage) Patch(ctx context.Context, obj core.Object, patch core.Patch, _ ...client.PatchOption) error { +func (c *Generic) Patch(ctx context.Context, obj core.Object, patch core.Patch, _ ...client.PatchOption) error { // Fail-fast: We must never save metadata-only structs if serializer.IsPartialObject(obj) { - return ErrCannotSaveMetadata + return backend.ErrCannotSaveMetadata } // Acquire the patch data from the "desired state" object given now, i.e. in MergeFrom{} @@ -217,7 +184,7 @@ func (s *storage) Patch(ctx context.Context, obj core.Object, patch core.Patch, // Load the current latest state into obj temporarily, before patching it // This also validates the GVK, name and namespace. - if err := s.backend.Get(ctx, obj); err != nil { + if err := c.backend.Get(ctx, obj); err != nil { return err } @@ -231,35 +198,34 @@ func (s *storage) Patch(ctx context.Context, obj core.Object, patch core.Patch, // Apply the patch into the object using the given byte patcher if unstruct, ok := obj.(kruntime.Unstructured); ok { // TODO: Provide an option for the schema - err = s.patcher.ApplyOnUnstructured(bytePatcher, patchJSON, unstruct, nil) + err = c.patcher.ApplyOnUnstructured(bytePatcher, patchJSON, unstruct, nil) } else { - err = s.patcher.ApplyOnStruct(bytePatcher, patchJSON, obj) + err = c.patcher.ApplyOnStruct(bytePatcher, patchJSON, obj) } if err != nil { return err } // Perform an update internally, similar to what .Update would yield - // TODO: Maybe write to storage conditionally? using DryRun all - return s.Update(ctx, obj) - //return s.update(ctx, obj, id) + // TODO: Maybe write to the Storage conditionally? using DryRun all + return c.Update(ctx, obj) } -// Delete removes an Object from the storage +// Delete removes an Object from the backend // PartialObjectMetadata should work here. -func (s *storage) Delete(ctx context.Context, obj core.Object, _ ...client.DeleteOption) error { - return s.backend.Delete(ctx, obj) +func (c *Generic) Delete(ctx context.Context, obj core.Object, _ ...client.DeleteOption) error { + return c.backend.Delete(ctx, obj) } // DeleteAllOf deletes all matched resources by first doing a List() operation on the given GVK of // obj (obj is not used for anything else) and the given filters in opts. Only the Partial Meta -func (s *storage) DeleteAllOf(ctx context.Context, obj core.Object, opts ...client.DeleteAllOfOption) error { +func (c *Generic) DeleteAllOf(ctx context.Context, obj core.Object, opts ...client.DeleteAllOfOption) error { // This applies both upstream and custom options, and propagates the options correctly to both // List() and Delete() customDeleteAllOpts := (&DeleteAllOfOptions{}).ApplyOptions(opts) // Get the GVK of the object - gvk, err := serializer.GVKForObject(s.backend.Scheme(), obj) + gvk, err := serializer.GVKForObject(c.Backend().Scheme(), obj) if err != nil { return err } @@ -268,31 +234,26 @@ func (s *storage) DeleteAllOf(ctx context.Context, obj core.Object, opts ...clie // UnstructuredList is used here so that we can use filters that operate on fields list := &unstructured.UnstructuredList{} list.SetGroupVersionKind(gvk) - if err := s.List(ctx, list, customDeleteAllOpts); err != nil { + if err := c.List(ctx, list, customDeleteAllOpts); err != nil { return err } // Loop through all of the matched items, and Delete them one-by-one for i := range list.Items { - if err := s.Delete(ctx, &list.Items[i], customDeleteAllOpts); err != nil { + if err := c.Delete(ctx, &list.Items[i], customDeleteAllOpts); err != nil { return err } } return nil } -// Close closes all underlying resources (e.g. goroutines) used; before the application exits -func (s *storage) Close() error { - return nil // nothing to do here for storage -} - // Scheme returns the scheme this client is using. -func (s *storage) Scheme() *kruntime.Scheme { - return s.backend.Scheme() +func (c *Generic) Scheme() *kruntime.Scheme { + return c.backend.Scheme() } // RESTMapper returns the rest this client is using. For now, this returns nil, so don't use. -func (s *storage) RESTMapper() meta.RESTMapper { +func (c *Generic) RESTMapper() meta.RESTMapper { return nil } @@ -320,7 +281,7 @@ func createUnstructuredObject(gvk core.GroupVersionKind) newObjectFunc { } } -func (s *storage) processKeys(ctx context.Context, ids []core.UnversionedObjectID, filterOpts *filter.FilterOptions, fn newObjectFunc, output chan core.Object) error { +func (c *Generic) processKeys(ctx context.Context, ids []core.UnversionedObjectID, filterOpts *filter.FilterOptions, fn newObjectFunc, output chan core.Object) error { wg := &sync.WaitGroup{} wg.Add(len(ids)) multiErr := &validation.MultiError{} // TODO: Thread-safe append @@ -335,7 +296,7 @@ func (s *storage) processKeys(ctx context.Context, ids []core.UnversionedObjectI return } - if err := s.Get(ctx, id.ObjectKey(), obj); err != nil { + if err := c.Get(ctx, id.ObjectKey(), obj); err != nil { multiErr.Errors = append(multiErr.Errors, err) return } diff --git a/pkg/storage/options.go b/pkg/storage/client/options.go similarity index 99% rename from pkg/storage/options.go rename to pkg/storage/client/options.go index 6768a0a9..7fa8f8ed 100644 --- a/pkg/storage/options.go +++ b/pkg/storage/client/options.go @@ -1,4 +1,4 @@ -package storage +package client import ( "github.com/weaveworks/libgitops/pkg/filter" diff --git a/pkg/storage/utils.go b/pkg/storage/client/utils.go similarity index 96% rename from pkg/storage/utils.go rename to pkg/storage/client/utils.go index dd5396cd..da869085 100644 --- a/pkg/storage/utils.go +++ b/pkg/storage/client/utils.go @@ -1,4 +1,4 @@ -package storage +package client import ( "errors" diff --git a/pkg/storage/raw/filefinder_simple.go b/pkg/storage/raw/filefinder_simple.go index 13df0f4b..62046136 100644 --- a/pkg/storage/raw/filefinder_simple.go +++ b/pkg/storage/raw/filefinder_simple.go @@ -17,12 +17,9 @@ import ( // using SimpleFileFinder as the FileFinder, and the local disk as target. // If you need more advanced customizablility than provided here, you can compose // the call to NewGenericFilesystemStorage yourself. -func NewSimpleStorage(dir string, ct serializer.ContentType, namespacer core.Namespacer) (FilesystemStorage, error) { +func NewSimpleStorage(dir string, namespacer core.Namespacer, opts SimpleFileFinderOptions) (FilesystemStorage, error) { fs := core.AferoContextForLocalDir(dir) - fileFinder, err := NewSimpleFileFinder(fs, SimpleFileFinderOptions{ - // ContentType is optional; JSON is used by default - ContentType: ct, - }) + fileFinder, err := NewSimpleFileFinder(fs, opts) if err != nil { return nil, err } From dc9d245436f45649460e4bc3821f84cc2921a911 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Sat, 23 Jan 2021 22:12:10 +0200 Subject: [PATCH 049/149] Move directories around; make pkg/storage top-level for abstract storages, and move everything filesystem-related under pkg/storage/filesystem. --- pkg/storage/backend/backend.go | 1 - pkg/storage/{raw/watch/events.go => event.go} | 37 +----- pkg/storage/{core => filesystem}/afero.go | 2 +- .../{core => filesystem}/dir_traversal.go | 2 +- .../{raw => filesystem}/filefinder_simple.go | 26 ++-- pkg/storage/{core => filesystem}/format.go | 2 +- pkg/storage/filesystem/interfaces.go | 46 +++++++ .../{core => filesystem}/path_excluder.go | 2 +- .../path_excluder_test.go | 2 +- .../rawstorage.go => filesystem/storage.go} | 58 +++++---- .../unstructured}/filefinder_mapped.go | 13 +- .../filesystem/unstructured/interfaces.go | 62 ++++++++++ .../unstructured}/mapped_cache.go | 2 +- pkg/storage/filesystem/watch/events.go | 36 ++++++ .../watch/inotify/filewatcher.go | 2 +- .../watch/inotify/filewatcher_test.go | 2 +- .../watch/inotify/options.go | 0 .../{raw => filesystem}/watch/interfaces.go | 42 ++----- .../watch/manifest/manifest.go | 7 +- .../{raw => filesystem}/watch/watch.go | 5 +- pkg/storage/{raw => }/interfaces.go | 114 ++++-------------- pkg/storage/{raw => }/objectinfo.go | 11 +- 22 files changed, 249 insertions(+), 225 deletions(-) rename pkg/storage/{raw/watch/events.go => event.go} (56%) rename pkg/storage/{core => filesystem}/afero.go (99%) rename pkg/storage/{core => filesystem}/dir_traversal.go (98%) rename pkg/storage/{raw => filesystem}/filefinder_simple.go (91%) rename pkg/storage/{core => filesystem}/format.go (99%) create mode 100644 pkg/storage/filesystem/interfaces.go rename pkg/storage/{core => filesystem}/path_excluder.go (98%) rename pkg/storage/{core => filesystem}/path_excluder_test.go (98%) rename pkg/storage/{raw/rawstorage.go => filesystem/storage.go} (68%) rename pkg/storage/{raw => filesystem/unstructured}/filefinder_mapped.go (93%) create mode 100644 pkg/storage/filesystem/unstructured/interfaces.go rename pkg/storage/{raw => filesystem/unstructured}/mapped_cache.go (98%) create mode 100644 pkg/storage/filesystem/watch/events.go rename pkg/storage/{raw => filesystem}/watch/inotify/filewatcher.go (99%) rename pkg/storage/{raw => filesystem}/watch/inotify/filewatcher_test.go (97%) rename pkg/storage/{raw => filesystem}/watch/inotify/options.go (100%) rename pkg/storage/{raw => filesystem}/watch/interfaces.go (63%) rename pkg/storage/{raw => filesystem}/watch/manifest/manifest.go (79%) rename pkg/storage/{raw => filesystem}/watch/watch.go (98%) rename pkg/storage/{raw => }/interfaces.go (57%) rename pkg/storage/{raw => }/objectinfo.go (70%) diff --git a/pkg/storage/backend/backend.go b/pkg/storage/backend/backend.go index 6706002c..29d49a99 100644 --- a/pkg/storage/backend/backend.go +++ b/pkg/storage/backend/backend.go @@ -8,7 +8,6 @@ import ( "github.com/weaveworks/libgitops/pkg/serializer" "github.com/weaveworks/libgitops/pkg/storage/core" - "github.com/weaveworks/libgitops/pkg/storage/raw" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" diff --git a/pkg/storage/raw/watch/events.go b/pkg/storage/event.go similarity index 56% rename from pkg/storage/raw/watch/events.go rename to pkg/storage/event.go index 7f60c087..92881ee2 100644 --- a/pkg/storage/raw/watch/events.go +++ b/pkg/storage/event.go @@ -1,4 +1,4 @@ -package watch +package storage import ( "fmt" @@ -6,41 +6,6 @@ import ( "github.com/weaveworks/libgitops/pkg/storage/core" ) -// FileEventType is an enum describing a change in a file's state -type FileEventType byte - -const ( - FileEventNone FileEventType = iota // 0 - FileEventModify // 1 - FileEventDelete // 2 - FileEventMove // 3 -) - -func (e FileEventType) String() string { - switch e { - case 0: - return "NONE" - case 1: - return "MODIFY" - case 2: - return "DELETE" - case 3: - return "MOVE" - } - - return "UNKNOWN" -} - -// FileEvent describes a file change of a certain kind at a certain -// (relative) path. Often emitted by FileEventsEmitter. -type FileEvent struct { - Path string - Type FileEventType -} - -// FileEventStream is a channel of FileEvents -type FileEventStream chan *FileEvent - // ObjectEventType is an enum describing a change in an Object's state. type ObjectEventType byte diff --git a/pkg/storage/core/afero.go b/pkg/storage/filesystem/afero.go similarity index 99% rename from pkg/storage/core/afero.go rename to pkg/storage/filesystem/afero.go index 2e0a9475..71b071c4 100644 --- a/pkg/storage/core/afero.go +++ b/pkg/storage/filesystem/afero.go @@ -1,4 +1,4 @@ -package core +package filesystem import ( "context" diff --git a/pkg/storage/core/dir_traversal.go b/pkg/storage/filesystem/dir_traversal.go similarity index 98% rename from pkg/storage/core/dir_traversal.go rename to pkg/storage/filesystem/dir_traversal.go index 8e13ade4..497c4d60 100644 --- a/pkg/storage/core/dir_traversal.go +++ b/pkg/storage/filesystem/dir_traversal.go @@ -1,4 +1,4 @@ -package core +package filesystem import ( "context" diff --git a/pkg/storage/raw/filefinder_simple.go b/pkg/storage/filesystem/filefinder_simple.go similarity index 91% rename from pkg/storage/raw/filefinder_simple.go rename to pkg/storage/filesystem/filefinder_simple.go index 62046136..d0b990e1 100644 --- a/pkg/storage/raw/filefinder_simple.go +++ b/pkg/storage/filesystem/filefinder_simple.go @@ -1,4 +1,4 @@ -package raw +package filesystem import ( "context" @@ -13,21 +13,21 @@ import ( "k8s.io/apimachinery/pkg/util/sets" ) -// NewSimpleStorage is a default opinionated constructor for a FilesystemStorage +// NewSimpleStorage is a default opinionated constructor for a Storage // using SimpleFileFinder as the FileFinder, and the local disk as target. // If you need more advanced customizablility than provided here, you can compose -// the call to NewGenericFilesystemStorage yourself. -func NewSimpleStorage(dir string, namespacer core.Namespacer, opts SimpleFileFinderOptions) (FilesystemStorage, error) { - fs := core.AferoContextForLocalDir(dir) +// the call to NewGenericStorage yourself. +func NewSimpleStorage(dir string, namespacer core.Namespacer, opts SimpleFileFinderOptions) (Storage, error) { + fs := AferoContextForLocalDir(dir) fileFinder, err := NewSimpleFileFinder(fs, opts) if err != nil { return nil, err } - // fileFinder and namespacer are validated by NewGenericFilesystemStorage. - return NewGenericFilesystemStorage(fileFinder, namespacer) + // fileFinder and namespacer are validated by NewGenericStorage. + return NewGeneric(fileFinder, namespacer) } -func NewSimpleFileFinder(fs core.AferoContext, opts SimpleFileFinderOptions) (*SimpleFileFinder, error) { +func NewSimpleFileFinder(fs AferoContext, opts SimpleFileFinderOptions) (*SimpleFileFinder, error) { if fs == nil { return nil, fmt.Errorf("NewSimpleFileFinder: fs is mandatory") } @@ -68,7 +68,7 @@ var _ FileFinder = &SimpleFileFinder{} // // This FileFinder does not support the ObjectAt method. type SimpleFileFinder struct { - fs core.AferoContext + fs AferoContext opts SimpleFileFinderOptions } @@ -80,12 +80,12 @@ type SimpleFileFinderOptions struct { // Default: serializer.ContentTypeJSON ContentType serializer.ContentType // Default: DefaultFileExtensionResolver - FileExtensionResolver core.FileExtensionResolver + FileExtensionResolver FileExtensionResolver } // TODO: Use group name "core" if group is "" to support core k8s objects. -func (f *SimpleFileFinder) Filesystem() core.AferoContext { +func (f *SimpleFileFinder) Filesystem() AferoContext { return f.fs } @@ -137,7 +137,7 @@ func (f *SimpleFileFinder) ContentType(ctx context.Context, _ core.UnversionedOb func (f *SimpleFileFinder) ext() (string, error) { resolver := f.opts.FileExtensionResolver if resolver == nil { - resolver = core.DefaultFileExtensionResolver + resolver = DefaultFileExtensionResolver } ext, err := resolver.ExtensionForContentType(f.contentType()) if err != nil { @@ -214,7 +214,7 @@ func (f *SimpleFileFinder) ListObjectIDs(ctx context.Context, gk core.GroupKind, return ids, nil } -func readDir(ctx context.Context, fs core.AferoContext, dir string) ([]string, error) { +func readDir(ctx context.Context, fs AferoContext, dir string) ([]string, error) { fi, err := fs.Stat(ctx, dir) if os.IsNotExist(err) { // It's ok if the directory doesn't exist (yet), we just don't have any items then :) diff --git a/pkg/storage/core/format.go b/pkg/storage/filesystem/format.go similarity index 99% rename from pkg/storage/core/format.go rename to pkg/storage/filesystem/format.go index 12c27a45..f5fd4a30 100644 --- a/pkg/storage/core/format.go +++ b/pkg/storage/filesystem/format.go @@ -1,4 +1,4 @@ -package core +package filesystem import ( "context" diff --git a/pkg/storage/filesystem/interfaces.go b/pkg/storage/filesystem/interfaces.go new file mode 100644 index 00000000..286a0d64 --- /dev/null +++ b/pkg/storage/filesystem/interfaces.go @@ -0,0 +1,46 @@ +package filesystem + +import ( + "context" + + "github.com/weaveworks/libgitops/pkg/storage" + "github.com/weaveworks/libgitops/pkg/storage/core" +) + +// Storage (in this filesystem package) extends storage.Storage by specializing it to operate in a +// filesystem context, and in other words use a FileFinder to locate the +// files to operate on. +type Storage interface { + storage.Storage + + // FileFinder returns the underlying FileFinder used. + // TODO: Maybe one Storage can have multiple FileFinders? + FileFinder() FileFinder +} + +// FileFinder is a generic implementation for locating files on disk, to be +// used by a Storage. +// +// Important: The caller MUST guarantee that the implementation can figure +// out if the GroupKind is namespaced or not by the following check: +// +// namespaced := id.ObjectKey().Namespace != "" +// +// In other words, the caller must enforce a namespace being set for namespaced +// kinds, and namespace not being set for non-namespaced kinds. +type FileFinder interface { + // Filesystem gets the underlying filesystem abstraction, if + // applicable. + Filesystem() AferoContext + + // ObjectPath gets the file path relative to the root directory. + // In order to support a create operation, this function must also return a valid path for + // files that do not yet exist on disk. + ObjectPath(ctx context.Context, id core.UnversionedObjectID) (string, error) + // ObjectAt retrieves the ID based on the given relative file path to fs. + ObjectAt(ctx context.Context, path string) (core.UnversionedObjectID, error) + // The FileFinder should be able to resolve the content type for various IDs + storage.ContentTypeResolver + // The FileFinder should be able to list namespaces and Object IDs + storage.Lister +} diff --git a/pkg/storage/core/path_excluder.go b/pkg/storage/filesystem/path_excluder.go similarity index 98% rename from pkg/storage/core/path_excluder.go rename to pkg/storage/filesystem/path_excluder.go index 3b51e52a..48a3df1e 100644 --- a/pkg/storage/core/path_excluder.go +++ b/pkg/storage/filesystem/path_excluder.go @@ -1,4 +1,4 @@ -package core +package filesystem import ( "context" diff --git a/pkg/storage/core/path_excluder_test.go b/pkg/storage/filesystem/path_excluder_test.go similarity index 98% rename from pkg/storage/core/path_excluder_test.go rename to pkg/storage/filesystem/path_excluder_test.go index 8121ac89..d253dbdf 100644 --- a/pkg/storage/core/path_excluder_test.go +++ b/pkg/storage/filesystem/path_excluder_test.go @@ -1,4 +1,4 @@ -package core +package filesystem import ( "context" diff --git a/pkg/storage/raw/rawstorage.go b/pkg/storage/filesystem/storage.go similarity index 68% rename from pkg/storage/raw/rawstorage.go rename to pkg/storage/filesystem/storage.go index fe2392f9..a5e4e400 100644 --- a/pkg/storage/raw/rawstorage.go +++ b/pkg/storage/filesystem/storage.go @@ -1,4 +1,4 @@ -package raw +package filesystem import ( "context" @@ -8,47 +8,48 @@ import ( "strconv" "github.com/weaveworks/libgitops/pkg/serializer" + "github.com/weaveworks/libgitops/pkg/storage" "github.com/weaveworks/libgitops/pkg/storage/core" "k8s.io/apimachinery/pkg/util/sets" ) -// NewGenericFilesystemStorage creates a new GenericFilesystemStorage using the given lower-level +// NewGeneric creates a new Generic using the given lower-level // FileFinder and Namespacer. -func NewGenericFilesystemStorage(fileFinder FileFinder, namespacer core.Namespacer) (FilesystemStorage, error) { +func NewGeneric(fileFinder FileFinder, namespacer core.Namespacer) (Storage, error) { if fileFinder == nil { - return nil, fmt.Errorf("NewGenericFilesystemStorage: fileFinder is mandatory") + return nil, fmt.Errorf("NewGeneric: fileFinder is mandatory") } if namespacer == nil { - return nil, fmt.Errorf("NewGenericFilesystemStorage: namespacer is mandatory") + return nil, fmt.Errorf("NewGeneric: namespacer is mandatory") } - return &GenericFilesystemStorage{ + return &Generic{ fileFinder: fileFinder, namespacer: namespacer, }, nil } -// GenericFilesystemStorage is a FilesystemStorage-compliant implementation, that +// Generic is a Storage-compliant implementation, that // combines the given lower-level FileFinder, Namespacer and AferoContext interfaces // in a generic manner. // // Checksum is calculated based on the modification timestamp of the file, or // alternatively, from info.Sys() returned from AferoContext.Stat(), if it can // be cast to a ChecksumContainer. -type GenericFilesystemStorage struct { +type Generic struct { fileFinder FileFinder namespacer core.Namespacer } -func (r *GenericFilesystemStorage) Namespacer() core.Namespacer { +func (r *Generic) Namespacer() core.Namespacer { return r.namespacer } -func (r *GenericFilesystemStorage) FileFinder() FileFinder { +func (r *Generic) FileFinder() FileFinder { return r.fileFinder } -func (r *GenericFilesystemStorage) Read(ctx context.Context, id core.UnversionedObjectID) ([]byte, error) { +func (r *Generic) Read(ctx context.Context, id core.UnversionedObjectID) ([]byte, error) { // Get the path and verify namespacing info p, err := r.getPath(ctx, id) if err != nil { @@ -62,7 +63,7 @@ func (r *GenericFilesystemStorage) Read(ctx context.Context, id core.Unversioned return r.FileFinder().Filesystem().ReadFile(ctx, p) } -func (r *GenericFilesystemStorage) Exists(ctx context.Context, id core.UnversionedObjectID) bool { +func (r *Generic) Exists(ctx context.Context, id core.UnversionedObjectID) bool { // Get the path and verify namespacing info p, err := r.getPath(ctx, id) if err != nil { @@ -71,12 +72,12 @@ func (r *GenericFilesystemStorage) Exists(ctx context.Context, id core.Unversion return r.exists(ctx, p) } -func (r *GenericFilesystemStorage) exists(ctx context.Context, path string) bool { +func (r *Generic) exists(ctx context.Context, path string) bool { exists, _ := r.FileFinder().Filesystem().Exists(ctx, path) return exists } -func (r *GenericFilesystemStorage) Stat(ctx context.Context, id core.UnversionedObjectID) (ObjectInfo, error) { +func (r *Generic) Stat(ctx context.Context, id core.UnversionedObjectID) (storage.ObjectInfo, error) { // Get the path and verify namespacing info p, err := r.getPath(ctx, id) if err != nil { @@ -94,7 +95,7 @@ func (r *GenericFilesystemStorage) Stat(ctx context.Context, id core.Unversioned // Get checksum checksum := checksumFromFileInfo(info) // Allow a custom implementation of afero return ObjectInfo directly - if chk, ok := info.Sys().(ChecksumContainer); ok { + if chk, ok := info.Sys().(storage.ChecksumContainer); ok { checksum = chk.Checksum() } @@ -104,15 +105,10 @@ func (r *GenericFilesystemStorage) Stat(ctx context.Context, id core.Unversioned return nil, err } - return &objectInfo{ - ct: contentType, - checksum: checksum, - filepath: p, - id: id, - }, nil + return storage.NewObjectInfo(contentType, checksum, p, id), nil } -func (r *GenericFilesystemStorage) ContentType(ctx context.Context, id core.UnversionedObjectID) (serializer.ContentType, error) { +func (r *Generic) ContentType(ctx context.Context, id core.UnversionedObjectID) (serializer.ContentType, error) { // Verify namespacing info if err := r.verifyID(id); err != nil { return "", err @@ -121,7 +117,7 @@ func (r *GenericFilesystemStorage) ContentType(ctx context.Context, id core.Unve return r.FileFinder().ContentType(ctx, id) } -func (r *GenericFilesystemStorage) Write(ctx context.Context, id core.UnversionedObjectID, content []byte) error { +func (r *Generic) Write(ctx context.Context, id core.UnversionedObjectID, content []byte) error { // Get the path and verify namespacing info p, err := r.getPath(ctx, id) if err != nil { @@ -138,7 +134,7 @@ func (r *GenericFilesystemStorage) Write(ctx context.Context, id core.Unversione return r.FileFinder().Filesystem().WriteFile(ctx, p, content, 0664) } -func (r *GenericFilesystemStorage) Delete(ctx context.Context, id core.UnversionedObjectID) error { +func (r *Generic) Delete(ctx context.Context, id core.UnversionedObjectID) error { // Get the path and verify namespacing info p, err := r.getPath(ctx, id) if err != nil { @@ -158,14 +154,14 @@ func (r *GenericFilesystemStorage) Delete(ctx context.Context, id core.Unversion // the caller to make sure they do not call this method for root-spaced // objects; for that the behavior is undefined (but returning an error // is recommended). -func (r *GenericFilesystemStorage) ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.String, error) { +func (r *Generic) ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.String, error) { namespaced, err := r.namespacer.IsNamespaced(gk) if err != nil { return nil, err } // Validate the groupkind if !namespaced { - return nil, fmt.Errorf("%w: cannot list namespaces for non-namespaced kind: %v", ErrNamespacedMismatch, gk) + return nil, fmt.Errorf("%w: cannot list namespaces for non-namespaced kind: %v", storage.ErrNamespacedMismatch, gk) } // Just use the underlying filefinder return r.FileFinder().ListNamespaces(ctx, gk) @@ -175,7 +171,7 @@ func (r *GenericFilesystemStorage) ListNamespaces(ctx context.Context, gk core.G // For namespaced GroupKinds, the caller must provide a namespace, and for // root-spaced GroupKinds, the caller must not. When namespaced, this function // must only return object IDs for that given namespace. -func (r *GenericFilesystemStorage) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) ([]core.UnversionedObjectID, error) { +func (r *Generic) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) ([]core.UnversionedObjectID, error) { // Validate the namespace parameter if err := VerifyNamespaced(r.Namespacer(), gk, namespace); err != nil { return nil, err @@ -184,7 +180,7 @@ func (r *GenericFilesystemStorage) ListObjectIDs(ctx context.Context, gk core.Gr return r.FileFinder().ListObjectIDs(ctx, gk, namespace) } -func (r *GenericFilesystemStorage) getPath(ctx context.Context, id core.UnversionedObjectID) (string, error) { +func (r *Generic) getPath(ctx context.Context, id core.UnversionedObjectID) (string, error) { // Verify namespacing info if err := r.verifyID(id); err != nil { return "", err @@ -193,7 +189,7 @@ func (r *GenericFilesystemStorage) getPath(ctx context.Context, id core.Unversio return r.FileFinder().ObjectPath(ctx, id) } -func (r *GenericFilesystemStorage) verifyID(id core.UnversionedObjectID) error { +func (r *Generic) verifyID(id core.UnversionedObjectID) error { return VerifyNamespaced(r.Namespacer(), id.GroupKind(), id.ObjectKey().Namespace) } @@ -211,9 +207,9 @@ func VerifyNamespaced(namespacer core.Namespacer, gk core.GroupKind, ns string) return err } if namespaced && ns == "" { - return fmt.Errorf("%w: namespaced kind %v requires non-empty namespace", ErrNamespacedMismatch, gk) + return fmt.Errorf("%w: namespaced kind %v requires non-empty namespace", storage.ErrNamespacedMismatch, gk) } else if !namespaced && ns != "" { - return fmt.Errorf("%w: non-namespaced kind %v must not have namespace parameter set", ErrNamespacedMismatch, gk) + return fmt.Errorf("%w: non-namespaced kind %v must not have namespace parameter set", storage.ErrNamespacedMismatch, gk) } return nil } diff --git a/pkg/storage/raw/filefinder_mapped.go b/pkg/storage/filesystem/unstructured/filefinder_mapped.go similarity index 93% rename from pkg/storage/raw/filefinder_mapped.go rename to pkg/storage/filesystem/unstructured/filefinder_mapped.go index 0913c3df..7c1a7430 100644 --- a/pkg/storage/raw/filefinder_mapped.go +++ b/pkg/storage/filesystem/unstructured/filefinder_mapped.go @@ -1,4 +1,4 @@ -package raw +package unstructured import ( "context" @@ -7,6 +7,7 @@ import ( "github.com/fluxcd/go-git-providers/validation" "github.com/weaveworks/libgitops/pkg/serializer" "github.com/weaveworks/libgitops/pkg/storage/core" + "github.com/weaveworks/libgitops/pkg/storage/filesystem" "k8s.io/apimachinery/pkg/util/sets" ) @@ -21,9 +22,9 @@ var _ MappedFileFinder = &GenericMappedFileFinder{} // NewGenericMappedFileFinder creates a new instance of GenericMappedFileFinder, // that implements the MappedFileFinder interface. The contentTyper is optional, // by default core.DefaultContentTyper will be used. -func NewGenericMappedFileFinder(contentTyper core.ContentTyper, fs core.AferoContext) MappedFileFinder { +func NewGenericMappedFileFinder(contentTyper filesystem.ContentTyper, fs filesystem.AferoContext) MappedFileFinder { if contentTyper == nil { - contentTyper = core.DefaultContentTyper + contentTyper = filesystem.DefaultContentTyper } if fs == nil { panic("NewGenericMappedFileFinder: fs is mandatory") @@ -47,13 +48,13 @@ func NewGenericMappedFileFinder(contentTyper core.ContentTyper, fs core.AferoCon // Objects without someone calling SetMapping() first. type GenericMappedFileFinder struct { // Default: DefaultContentTyper - contentTyper core.ContentTyper - fs core.AferoContext + contentTyper filesystem.ContentTyper + fs filesystem.AferoContext branch branch } -func (f *GenericMappedFileFinder) Filesystem() core.AferoContext { +func (f *GenericMappedFileFinder) Filesystem() filesystem.AferoContext { return f.fs } diff --git a/pkg/storage/filesystem/unstructured/interfaces.go b/pkg/storage/filesystem/unstructured/interfaces.go new file mode 100644 index 00000000..0cb398f4 --- /dev/null +++ b/pkg/storage/filesystem/unstructured/interfaces.go @@ -0,0 +1,62 @@ +package unstructured + +import ( + "context" + + "github.com/weaveworks/libgitops/pkg/storage/core" + "github.com/weaveworks/libgitops/pkg/storage/filesystem" +) + +// Storage is a raw Storage interface that builds on top +// of Storage. It uses an ObjectRecognizer to recognize +// otherwise unknown objects in unstructured files. +// The Storage must use a MappedFileFinder underneath. +// +// Multiple Objects in the same file, or multiple Objects with the +// same ID in multiple files are not supported. +type Storage interface { + filesystem.Storage + + // Sync synchronizes the current state of the filesystem with the + // cached mappings in the MappedFileFinder. + Sync(ctx context.Context) error + + // ObjectRecognizer returns the underlying ObjectRecognizer used. + ObjectRecognizer() core.ObjectRecognizer + // PathExcluder specifies what paths to not sync + // TODO: enable this + // PathExcluder() core.PathExcluder + // MappedFileFinder returns the underlying MappedFileFinder used. + MappedFileFinder() MappedFileFinder +} + +// MappedFileFinder is an extension to FileFinder that allows it to have an internal +// cache with mappings between UnversionedObjectID and a ChecksumPath. This allows +// higher-order interfaces to manage Objects in files in an unorganized directory +// (e.g. a Git repo). +// +// Multiple Objects in the same file, or multiple Objects with the +// same ID in multiple files are not supported. +type MappedFileFinder interface { + filesystem.FileFinder + + // GetMapping retrieves a mapping in the system. + GetMapping(ctx context.Context, id core.UnversionedObjectID) (ChecksumPath, bool) + // SetMapping binds an ID to a physical file path. This operation overwrites + // any previous mapping for id. + SetMapping(ctx context.Context, id core.UnversionedObjectID, checksumPath ChecksumPath) + // ResetMappings replaces all mappings at once to the ones in m. + ResetMappings(ctx context.Context, m map[core.UnversionedObjectID]ChecksumPath) + // DeleteMapping removes the mapping for the given id. + DeleteMapping(ctx context.Context, id core.UnversionedObjectID) +} + +// ChecksumPath is a tuple of a given Checksum and relative file Path, +// for use in MappedFileFinder. +type ChecksumPath struct { + // TODO: Implement ChecksumContainer, or make ChecksumPath a + // sub-interface of ObjectID? + Checksum string + // Note: path is relative to the AferoContext. + Path string +} diff --git a/pkg/storage/raw/mapped_cache.go b/pkg/storage/filesystem/unstructured/mapped_cache.go similarity index 98% rename from pkg/storage/raw/mapped_cache.go rename to pkg/storage/filesystem/unstructured/mapped_cache.go index 28455f18..08aeb835 100644 --- a/pkg/storage/raw/mapped_cache.go +++ b/pkg/storage/filesystem/unstructured/mapped_cache.go @@ -1,4 +1,4 @@ -package raw +package unstructured import "github.com/weaveworks/libgitops/pkg/storage/core" diff --git a/pkg/storage/filesystem/watch/events.go b/pkg/storage/filesystem/watch/events.go new file mode 100644 index 00000000..4db6d63a --- /dev/null +++ b/pkg/storage/filesystem/watch/events.go @@ -0,0 +1,36 @@ +package watch + +// FileEventType is an enum describing a change in a file's state +type FileEventType byte + +const ( + FileEventNone FileEventType = iota // 0 + FileEventModify // 1 + FileEventDelete // 2 + FileEventMove // 3 +) + +func (e FileEventType) String() string { + switch e { + case 0: + return "NONE" + case 1: + return "MODIFY" + case 2: + return "DELETE" + case 3: + return "MOVE" + } + + return "UNKNOWN" +} + +// FileEvent describes a file change of a certain kind at a certain +// (relative) path. Often emitted by FileEventsEmitter. +type FileEvent struct { + Path string + Type FileEventType +} + +// FileEventStream is a channel of FileEvents +type FileEventStream chan *FileEvent diff --git a/pkg/storage/raw/watch/inotify/filewatcher.go b/pkg/storage/filesystem/watch/inotify/filewatcher.go similarity index 99% rename from pkg/storage/raw/watch/inotify/filewatcher.go rename to pkg/storage/filesystem/watch/inotify/filewatcher.go index 4524c8d2..cf7efa08 100644 --- a/pkg/storage/raw/watch/inotify/filewatcher.go +++ b/pkg/storage/filesystem/watch/inotify/filewatcher.go @@ -12,7 +12,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/spf13/afero" "github.com/weaveworks/libgitops/pkg/storage/core" - "github.com/weaveworks/libgitops/pkg/storage/raw/watch" + "github.com/weaveworks/libgitops/pkg/storage/filesystem/watch" "github.com/weaveworks/libgitops/pkg/util/sync" "golang.org/x/sys/unix" "k8s.io/apimachinery/pkg/util/sets" diff --git a/pkg/storage/raw/watch/inotify/filewatcher_test.go b/pkg/storage/filesystem/watch/inotify/filewatcher_test.go similarity index 97% rename from pkg/storage/raw/watch/inotify/filewatcher_test.go rename to pkg/storage/filesystem/watch/inotify/filewatcher_test.go index 620c139d..cc1fa7d6 100644 --- a/pkg/storage/raw/watch/inotify/filewatcher_test.go +++ b/pkg/storage/filesystem/watch/inotify/filewatcher_test.go @@ -6,7 +6,7 @@ import ( "testing" "github.com/rjeczalik/notify" - "github.com/weaveworks/libgitops/pkg/storage/raw/watch" + "github.com/weaveworks/libgitops/pkg/storage/filesystem/watch" "golang.org/x/sys/unix" ) diff --git a/pkg/storage/raw/watch/inotify/options.go b/pkg/storage/filesystem/watch/inotify/options.go similarity index 100% rename from pkg/storage/raw/watch/inotify/options.go rename to pkg/storage/filesystem/watch/inotify/options.go diff --git a/pkg/storage/raw/watch/interfaces.go b/pkg/storage/filesystem/watch/interfaces.go similarity index 63% rename from pkg/storage/raw/watch/interfaces.go rename to pkg/storage/filesystem/watch/interfaces.go index 67bc335a..5e502c62 100644 --- a/pkg/storage/raw/watch/interfaces.go +++ b/pkg/storage/filesystem/watch/interfaces.go @@ -5,8 +5,9 @@ import ( "errors" "io" - "github.com/weaveworks/libgitops/pkg/storage/core" - "github.com/weaveworks/libgitops/pkg/storage/raw" + "github.com/weaveworks/libgitops/pkg/storage" + "github.com/weaveworks/libgitops/pkg/storage/filesystem" + "github.com/weaveworks/libgitops/pkg/storage/filesystem/unstructured" ) var ( @@ -32,51 +33,30 @@ type FileEventsEmitter interface { Suspend(ctx context.Context, path string) // PathExcluder returns the PathExcluder used internally - PathExcluder() core.PathExcluder + PathExcluder() filesystem.PathExcluder // ContentTyper returns the ContentTyper used internally - ContentTyper() core.ContentTyper + ContentTyper() filesystem.ContentTyper // Filesystem returns the filesystem abstraction used internally - Filesystem() core.AferoContext + Filesystem() filesystem.AferoContext // Close closes the emitter gracefully. io.Closer } -// EventStorageCommon contains the methods that EventStorage adds to the -// to the normal raw.Storage. -type EventStorageCommon interface { - // WatchForObjectEvents starts feeding ObjectEvents into the given "into" - // channel. The caller is responsible for setting a channel buffering - // limit large enough to not block normal operation. An error might - // be returned if a maximum amount of watches has been opened already, - // e.g. ErrTooManyWatches. - WatchForObjectEvents(ctx context.Context, into ObjectEventStream) error - - // Close closes the EventStorage and underlying resources gracefully. - io.Closer -} - // FileEventStorageCommon is an extension to EventStorageCommon that // also contains an underlying FileEventsEmitter. This is meant to be -// used in tandem with raw.FilesystemStorages. +// used in tandem with filesystem.Storages. type FileEventStorageCommon interface { - EventStorageCommon + storage.EventStorageCommon // FileEventsEmitter gets the FileEventsEmitter used internally. FileEventsEmitter() FileEventsEmitter } -// EventStorage is the abstract combination of a normal raw.Storage, and -// a possiblility to listen for changes to objects as they change. -type EventStorage interface { - raw.Storage - EventStorageCommon -} - -// FilesystemEventStorage is the combination of a raw.FilesystemStorage, +// FilesystemEventStorage is the combination of a filesystem.Storage, // and the possibility to listen for object updates from a FileEventsEmitter. type FilesystemEventStorage interface { - raw.FilesystemStorage + filesystem.Storage FileEventStorageCommon } @@ -86,6 +66,6 @@ type FilesystemEventStorage interface { // When the Sync() function is run; the ObjectEvents that are emitted to the // listening channels with have ObjectEvent.Type == ObjectEventSync. type UnstructuredEventStorage interface { - raw.UnstructuredStorage + unstructured.UnstructuredStorage FileEventStorageCommon } diff --git a/pkg/storage/raw/watch/manifest/manifest.go b/pkg/storage/filesystem/watch/manifest/manifest.go similarity index 79% rename from pkg/storage/raw/watch/manifest/manifest.go rename to pkg/storage/filesystem/watch/manifest/manifest.go index 793e453c..478dc7df 100644 --- a/pkg/storage/raw/watch/manifest/manifest.go +++ b/pkg/storage/filesystem/watch/manifest/manifest.go @@ -2,13 +2,12 @@ package manifest import ( "github.com/weaveworks/libgitops/pkg/storage/core" - "github.com/weaveworks/libgitops/pkg/storage/raw" - "github.com/weaveworks/libgitops/pkg/storage/raw/watch" - "github.com/weaveworks/libgitops/pkg/storage/raw/watch/inotify" + "github.com/weaveworks/libgitops/pkg/storage/filesystem/watch" + "github.com/weaveworks/libgitops/pkg/storage/filesystem/watch/inotify" ) // NewManifestStorage is a high-level constructor for a generic -// MappedFileFinder and FilesystemStorage, together with a +// MappedFileFinder and filesystem.Storage, together with a // inotify FileWatcher; all combined into an UnstructuredEventStorage. func NewManifestStorage( dir string, diff --git a/pkg/storage/raw/watch/watch.go b/pkg/storage/filesystem/watch/watch.go similarity index 98% rename from pkg/storage/raw/watch/watch.go rename to pkg/storage/filesystem/watch/watch.go index 0c307919..23deafe2 100644 --- a/pkg/storage/raw/watch/watch.go +++ b/pkg/storage/filesystem/watch/watch.go @@ -8,7 +8,6 @@ import ( "github.com/sirupsen/logrus" "github.com/weaveworks/libgitops/pkg/storage/core" - "github.com/weaveworks/libgitops/pkg/storage/raw" "github.com/weaveworks/libgitops/pkg/util/sync" ) @@ -16,7 +15,7 @@ const defaultEventsBufferSize = 4096 // NewGenericUnstructuredEventStorage is an extended Storage implementation, which // together with the provided ObjectRecognizer and FileEventsEmitter listens for -// file events, keeps the mappings of the FilesystemStorage's MappedFileFinder +// file events, keeps the mappings of the filesystem.Storage's MappedFileFinder // in sync (s must use the mapped variant), and sends high-level ObjectEvents // upstream. // @@ -31,7 +30,7 @@ func NewGenericUnstructuredEventStorage( // TODO: Possibly relax this requirement later, maybe it can also work for the SimpleFileFinder? fileFinder, ok := s.FileFinder().(raw.MappedFileFinder) if !ok { - return nil, errors.New("the given FilesystemStorage must use a MappedFileFinder") + return nil, errors.New("the given filesystem.Storage must use a MappedFileFinder") } return &GenericUnstructuredEventStorage{ diff --git a/pkg/storage/raw/interfaces.go b/pkg/storage/interfaces.go similarity index 57% rename from pkg/storage/raw/interfaces.go rename to pkg/storage/interfaces.go index f49847d4..73520c65 100644 --- a/pkg/storage/raw/interfaces.go +++ b/pkg/storage/interfaces.go @@ -1,8 +1,9 @@ -package raw +package storage import ( "context" "errors" + "io" "github.com/weaveworks/libgitops/pkg/serializer" "github.com/weaveworks/libgitops/pkg/storage/core" @@ -49,7 +50,7 @@ type Reader interface { Read(ctx context.Context, id core.UnversionedObjectID) ([]byte, error) // Stat returns information about the object, e.g. checksum, // content type, and possibly, path on disk (in the case of - // FilesystemStorage), or core.NewErrNotFound if not found + // filesystem.Storage), or core.NewErrNotFound if not found Stat(ctx context.Context, id core.UnversionedObjectID) (ObjectInfo, error) // Resolve ContentType ContentTypeResolver @@ -90,7 +91,7 @@ type Lister interface { // ObjectInfo is the return value from Storage.Stat(). It provides the // user with information about the given Object, e.g. its ContentType, // a checksum, and its relative path on disk, if the Storage is a -// FilesystemStorage. +// filesystem.Storage. type ObjectInfo interface { // ContentTyped returns the ContentType of the Object when stored. serializer.ContentTyped @@ -132,94 +133,25 @@ type Writer interface { Delete(ctx context.Context, id core.UnversionedObjectID) error } -// FilesystemStorage extends Storage by specializing it to operate in a -// filesystem context, and in other words use a FileFinder to locate the -// files to operate on. -type FilesystemStorage interface { - Storage - - // FileFinder returns the underlying FileFinder used. - // TODO: Maybe one Storage can have multiple FileFinders? - FileFinder() FileFinder -} - -// FileFinder is a generic implementation for locating files on disk, to be -// used by a FilesystemStorage. -// -// Important: The caller MUST guarantee that the implementation can figure -// out if the GroupKind is namespaced or not by the following check: -// -// namespaced := id.ObjectKey().Namespace != "" -// -// In other words, the caller must enforce a namespace being set for namespaced -// kinds, and namespace not being set for non-namespaced kinds. -type FileFinder interface { - // Filesystem gets the underlying filesystem abstraction, if - // applicable. - Filesystem() core.AferoContext - - // ObjectPath gets the file path relative to the root directory. - // In order to support a create operation, this function must also return a valid path for - // files that do not yet exist on disk. - ObjectPath(ctx context.Context, id core.UnversionedObjectID) (string, error) - // ObjectAt retrieves the ID based on the given relative file path to fs. - ObjectAt(ctx context.Context, path string) (core.UnversionedObjectID, error) - // The FileFinder should be able to resolve the content type for various IDs - ContentTypeResolver - // The FileFinder should be able to list namespaces and Object IDs - Lister -} - -// MappedFileFinder is an extension to FileFinder that allows it to have an internal -// cache with mappings between UnversionedObjectID and a ChecksumPath. This allows -// higher-order interfaces to manage Objects in files in an unorganized directory -// (e.g. a Git repo). -// -// Multiple Objects in the same file, or multiple Objects with the -// same ID in multiple files are not supported. -type MappedFileFinder interface { - FileFinder - - // GetMapping retrieves a mapping in the system. - GetMapping(ctx context.Context, id core.UnversionedObjectID) (ChecksumPath, bool) - // SetMapping binds an ID to a physical file path. This operation overwrites - // any previous mapping for id. - SetMapping(ctx context.Context, id core.UnversionedObjectID, checksumPath ChecksumPath) - // ResetMappings replaces all mappings at once to the ones in m. - ResetMappings(ctx context.Context, m map[core.UnversionedObjectID]ChecksumPath) - // DeleteMapping removes the mapping for the given id. - DeleteMapping(ctx context.Context, id core.UnversionedObjectID) +// EventStorageCommon contains the methods that EventStorage adds to the +// to the normal Storage. +type EventStorageCommon interface { + // WatchForObjectEvents starts feeding ObjectEvents into the given "into" + // channel. The caller is responsible for setting a channel buffering + // limit large enough to not block normal operation. An error might + // be returned if a maximum amount of watches has been opened already, + // e.g. ErrTooManyWatches. + WatchForObjectEvents(ctx context.Context, into ObjectEventStream) error + + // Close closes the EventStorage and underlying resources gracefully. + io.Closer } -// UnstructuredStorage is a raw Storage interface that builds on top -// of FilesystemStorage. It uses an ObjectRecognizer to recognize -// otherwise unknown objects in unstructured files. -// The FilesystemStorage must use a MappedFileFinder underneath. -// -// Multiple Objects in the same file, or multiple Objects with the -// same ID in multiple files are not supported. -type UnstructuredStorage interface { - FilesystemStorage - - // Sync synchronizes the current state of the filesystem with the - // cached mappings in the MappedFileFinder. - Sync(ctx context.Context) error - - // ObjectRecognizer returns the underlying ObjectRecognizer used. - ObjectRecognizer() core.ObjectRecognizer - // PathExcluder specifies what paths to not sync - // TODO: enable this - // PathExcluder() core.PathExcluder - // MappedFileFinder returns the underlying MappedFileFinder used. - MappedFileFinder() MappedFileFinder -} - -// ChecksumPath is a tuple of a given Checksum and relative file Path, -// for use in MappedFileFinder. -type ChecksumPath struct { - // TODO: Implement ChecksumContainer, or make ChecksumPath a - // sub-interface of ObjectID? - Checksum string - // Note: path is relative to the AferoContext. - Path string +// EventStorage is the abstract combination of a normal Storage, and +// a possiblility to listen for changes to objects as they change. +// TODO: Maybe we could use some of controller-runtime's built-in functionality +// for watching for changes? +type EventStorage interface { + Storage + EventStorageCommon } diff --git a/pkg/storage/raw/objectinfo.go b/pkg/storage/objectinfo.go similarity index 70% rename from pkg/storage/raw/objectinfo.go rename to pkg/storage/objectinfo.go index 51936159..7ddc78cb 100644 --- a/pkg/storage/raw/objectinfo.go +++ b/pkg/storage/objectinfo.go @@ -1,10 +1,19 @@ -package raw +package storage import ( "github.com/weaveworks/libgitops/pkg/serializer" "github.com/weaveworks/libgitops/pkg/storage/core" ) +func NewObjectInfo(ct serializer.ContentType, checksum string, filepath string, id core.UnversionedObjectID) ObjectInfo { + return &objectInfo{ + ct: ct, + checksum: checksum, + filepath: filepath, + id: id, + } +} + var _ ObjectInfo = &objectInfo{} type objectInfo struct { From 7f9b4326756f8f2eb67a551ada3876f1e2c8954e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Sat, 23 Jan 2021 22:37:53 +0200 Subject: [PATCH 050/149] Rename AferoContext to Filesystem, and add a checksum method that'll come in handy. --- pkg/storage/filesystem/afero.go | 96 ---------------------- pkg/storage/filesystem/filesystem.go | 115 +++++++++++++++++++++++++++ 2 files changed, 115 insertions(+), 96 deletions(-) delete mode 100644 pkg/storage/filesystem/afero.go create mode 100644 pkg/storage/filesystem/filesystem.go diff --git a/pkg/storage/filesystem/afero.go b/pkg/storage/filesystem/afero.go deleted file mode 100644 index 71b071c4..00000000 --- a/pkg/storage/filesystem/afero.go +++ /dev/null @@ -1,96 +0,0 @@ -package filesystem - -import ( - "context" - "os" - "path/filepath" - - "github.com/spf13/afero" -) - -// AferoContext extends afero.Fs and afero.Afero with contexts added to every method. -type AferoContext interface { - // RootDirectory specifies where on disk the root directory is stored. - // This path MUST be absolute. All other paths for the other methods - // MUST be relative to this directory. - RootDirectory() string - - // Members of afero.Fs - - // MkdirAll creates a directory path and all parents that does not exist - // yet. - MkdirAll(ctx context.Context, path string, perm os.FileMode) error - // Remove removes a file identified by name, returning an error, if any - // happens. - Remove(ctx context.Context, name string) error - // Stat returns a FileInfo describing the named file, or an error, if any - // happens. - Stat(ctx context.Context, name string) (os.FileInfo, error) - - // Members of afero.Afero - - ReadDir(ctx context.Context, dirname string) ([]os.FileInfo, error) - - Exists(ctx context.Context, path string) (bool, error) - - ReadFile(ctx context.Context, filename string) ([]byte, error) - - WriteFile(ctx context.Context, filename string, data []byte, perm os.FileMode) error - - Walk(ctx context.Context, root string, walkFn filepath.WalkFunc) error -} - -// AferoContextForLocalDir creates a new afero.OsFs for the local directory, wrapped -// in AferoContextWrapperForDir. -func AferoContextForLocalDir(rootDir string) AferoContext { - return AferoContextWrapperForDir(afero.NewOsFs(), rootDir) -} - -// AferoContextWrapperForDir wraps an underlying afero.Fs without context knowledge, -// in a AferoContext-compliant implementation; scoped at the given directory -// (i.e. wrapped in afero.NewBasePathFs(fs, rootDir)). -func AferoContextWrapperForDir(fs afero.Fs, rootDir string) AferoContext { - // TODO: rootDir validation? It must be absolute, exist, and be a directory. - return &aferoWithoutCtx{afero.NewBasePathFs(fs, rootDir), rootDir} -} - -type aferoWithoutCtx struct { - fs afero.Fs - rootDir string -} - -func (a *aferoWithoutCtx) RootDirectory() string { - return a.rootDir -} - -func (a *aferoWithoutCtx) MkdirAll(_ context.Context, path string, perm os.FileMode) error { - return a.fs.MkdirAll(path, perm) -} - -func (a *aferoWithoutCtx) Remove(_ context.Context, name string) error { - return a.fs.Remove(name) -} - -func (a *aferoWithoutCtx) Stat(_ context.Context, name string) (os.FileInfo, error) { - return a.fs.Stat(name) -} - -func (a *aferoWithoutCtx) ReadDir(_ context.Context, dirname string) ([]os.FileInfo, error) { - return afero.ReadDir(a.fs, dirname) -} - -func (a *aferoWithoutCtx) Exists(_ context.Context, path string) (bool, error) { - return afero.Exists(a.fs, path) -} - -func (a *aferoWithoutCtx) ReadFile(_ context.Context, filename string) ([]byte, error) { - return afero.ReadFile(a.fs, filename) -} - -func (a *aferoWithoutCtx) WriteFile(_ context.Context, filename string, data []byte, perm os.FileMode) error { - return afero.WriteFile(a.fs, filename, data, perm) -} - -func (a *aferoWithoutCtx) Walk(_ context.Context, root string, walkFn filepath.WalkFunc) error { - return afero.Walk(a.fs, root, walkFn) -} diff --git a/pkg/storage/filesystem/filesystem.go b/pkg/storage/filesystem/filesystem.go new file mode 100644 index 00000000..5897d86b --- /dev/null +++ b/pkg/storage/filesystem/filesystem.go @@ -0,0 +1,115 @@ +package filesystem + +import ( + "context" + "os" + "path/filepath" + "strconv" + + "github.com/spf13/afero" +) + +// Filesystem extends afero.Fs and afero.Afero with contexts added to every method. +type Filesystem interface { + + // Members of afero.Fs + + // MkdirAll creates a directory path and all parents that does not exist + // yet. + MkdirAll(ctx context.Context, path string, perm os.FileMode) error + // Remove removes a file identified by name, returning an error, if any + // happens. + Remove(ctx context.Context, name string) error + // Stat returns a FileInfo describing the named file, or an error, if any + // happens. + Stat(ctx context.Context, name string) (os.FileInfo, error) + + // Members of afero.Afero + + ReadDir(ctx context.Context, dirname string) ([]os.FileInfo, error) + + Exists(ctx context.Context, path string) (bool, error) + + ReadFile(ctx context.Context, filename string) ([]byte, error) + + WriteFile(ctx context.Context, filename string, data []byte, perm os.FileMode) error + + Walk(ctx context.Context, root string, walkFn filepath.WalkFunc) error + + // Custom methods + + Checksum(ctx context.Context, filename string) (string, error) + + // RootDirectory specifies where on disk the root directory is stored. + // This path MUST be absolute. All other paths for the other methods + // MUST be relative to this directory. + RootDirectory() string +} + +// NewOSFilesystem creates a new afero.OsFs for the local directory, wrapped +// in FilesystemWrapperForDir. +func NewOSFilesystem(rootDir string) Filesystem { + return NewFilesystem(afero.NewOsFs(), rootDir) +} + +// NewFilesystem wraps an underlying afero.Fs without context knowledge, +// in a Filesystem-compliant implementation; scoped at the given directory +// (i.e. wrapped in afero.NewBasePathFs(fs, rootDir)). +func NewFilesystem(fs afero.Fs, rootDir string) Filesystem { + // TODO: rootDir validation? It must be absolute, exist, and be a directory. + return &filesystem{afero.NewBasePathFs(fs, rootDir), rootDir} +} + +type filesystem struct { + fs afero.Fs + rootDir string +} + +func (f *filesystem) RootDirectory() string { + return f.rootDir +} + +func (f *filesystem) Checksum(ctx context.Context, filename string) (string, error) { + fi, err := f.Stat(ctx, filename) + if err != nil { + return "", err + } + return checksumFromFileInfo(fi), nil +} + +func (f *filesystem) MkdirAll(_ context.Context, path string, perm os.FileMode) error { + return f.fs.MkdirAll(path, perm) +} + +func (f *filesystem) Remove(_ context.Context, name string) error { + return f.fs.Remove(name) +} + +func (f *filesystem) Stat(_ context.Context, name string) (os.FileInfo, error) { + return f.fs.Stat(name) +} + +func (f *filesystem) ReadDir(_ context.Context, dirname string) ([]os.FileInfo, error) { + return afero.ReadDir(f.fs, dirname) +} + +func (f *filesystem) Exists(_ context.Context, path string) (bool, error) { + return afero.Exists(f.fs, path) +} + +func (f *filesystem) ReadFile(_ context.Context, filename string) ([]byte, error) { + return afero.ReadFile(f.fs, filename) +} + +func (f *filesystem) WriteFile(_ context.Context, filename string, data []byte, perm os.FileMode) error { + return afero.WriteFile(f.fs, filename, data, perm) +} + +func (f *filesystem) Walk(_ context.Context, root string, walkFn filepath.WalkFunc) error { + return afero.Walk(f.fs, root, walkFn) +} + +// TODO: Move to the Filesystem abstraction +func checksumFromFileInfo(fi os.FileInfo) string { + return strconv.FormatInt(fi.ModTime().UnixNano(), 10) +} From 51e94ade08d16ebe8897ce569d7e6df88f4b0107 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Sat, 23 Jan 2021 22:41:21 +0200 Subject: [PATCH 051/149] Fixup all imports and similar changes from the earlier two restructurings. Now filesystem.Storage uses the Filesystem's checksum method. --- pkg/storage/backend/backend.go | 11 ++-- pkg/storage/filesystem/dir_traversal.go | 6 +-- pkg/storage/filesystem/filefinder_simple.go | 10 ++-- pkg/storage/filesystem/filesystem.go | 8 ++- pkg/storage/filesystem/format.go | 6 +-- pkg/storage/filesystem/interfaces.go | 2 +- pkg/storage/filesystem/path_excluder.go | 4 +- pkg/storage/filesystem/storage.go | 49 ++++-------------- .../unstructured/filefinder_mapped.go | 6 +-- .../filesystem/unstructured/interfaces.go | 2 +- .../filesystem/watch/inotify/filewatcher.go | 17 +++---- .../filesystem/watch/inotify/options.go | 14 ++--- pkg/storage/filesystem/watch/interfaces.go | 4 +- .../filesystem/watch/manifest/manifest.go | 12 +++-- pkg/storage/filesystem/watch/watch.go | 51 ++++++++++--------- 15 files changed, 90 insertions(+), 112 deletions(-) diff --git a/pkg/storage/backend/backend.go b/pkg/storage/backend/backend.go index 29d49a99..442698d5 100644 --- a/pkg/storage/backend/backend.go +++ b/pkg/storage/backend/backend.go @@ -7,6 +7,7 @@ import ( "fmt" "github.com/weaveworks/libgitops/pkg/serializer" + "github.com/weaveworks/libgitops/pkg/storage" "github.com/weaveworks/libgitops/pkg/storage/core" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -32,7 +33,7 @@ var ( // TODO: Need to make sure we never write this internal struct to disk (MarshalJSON error?) type Accessors interface { - Storage() raw.Storage + Storage() storage.Storage NamespaceEnforcer() core.NamespaceEnforcer Scheme() *runtime.Scheme } @@ -46,7 +47,7 @@ type Reader interface { Accessors Get(ctx context.Context, obj core.Object) error - raw.Lister + storage.Lister } type Writer interface { @@ -89,7 +90,7 @@ type StorageVersioner interface { } func NewGeneric( - storage raw.Storage, + storage storage.Storage, serializer serializer.Serializer, // TODO: only scheme required, encode/decode optional? enforcer core.NamespaceEnforcer, validator Validator, // TODO: optional? @@ -124,7 +125,7 @@ type Generic struct { decoder serializer.Decoder encoder serializer.Encoder - storage raw.Storage + storage storage.Storage enforcer core.NamespaceEnforcer validator Validator versioner StorageVersioner @@ -134,7 +135,7 @@ func (b *Generic) Scheme() *runtime.Scheme { return b.scheme } -func (b *Generic) Storage() raw.Storage { +func (b *Generic) Storage() storage.Storage { return b.storage } diff --git a/pkg/storage/filesystem/dir_traversal.go b/pkg/storage/filesystem/dir_traversal.go index 497c4d60..6658292a 100644 --- a/pkg/storage/filesystem/dir_traversal.go +++ b/pkg/storage/filesystem/dir_traversal.go @@ -5,10 +5,10 @@ import ( "os" ) -// ListValidFilesInFilesystem discovers files in the given AferoContext that has a +// ListValidFilesInFilesystem discovers files in the given Filesystem that has a // ContentType that contentTyper recognizes, and is not a path that is excluded by // pathExcluder. -func ListValidFilesInFilesystem(ctx context.Context, fs AferoContext, contentTyper ContentTyper, pathExcluder PathExcluder) (files []string, err error) { +func ListValidFilesInFilesystem(ctx context.Context, fs Filesystem, contentTyper ContentTyper, pathExcluder PathExcluder) (files []string, err error) { err = fs.Walk(ctx, "", func(path string, info os.FileInfo, err error) error { if err != nil { return err @@ -25,7 +25,7 @@ func ListValidFilesInFilesystem(ctx context.Context, fs AferoContext, contentTyp // IsValidFileInFilesystem checks if file (a relative path) has a ContentType // that contentTyper recognizes, and is not a path that is excluded by pathExcluder. -func IsValidFileInFilesystem(ctx context.Context, fs AferoContext, contentTyper ContentTyper, pathExcluder PathExcluder, file string) bool { +func IsValidFileInFilesystem(ctx context.Context, fs Filesystem, contentTyper ContentTyper, pathExcluder PathExcluder, file string) bool { // return false if this path should be excluded if pathExcluder.ShouldExcludePath(ctx, fs, file) { return false diff --git a/pkg/storage/filesystem/filefinder_simple.go b/pkg/storage/filesystem/filefinder_simple.go index d0b990e1..539f2578 100644 --- a/pkg/storage/filesystem/filefinder_simple.go +++ b/pkg/storage/filesystem/filefinder_simple.go @@ -18,7 +18,7 @@ import ( // If you need more advanced customizablility than provided here, you can compose // the call to NewGenericStorage yourself. func NewSimpleStorage(dir string, namespacer core.Namespacer, opts SimpleFileFinderOptions) (Storage, error) { - fs := AferoContextForLocalDir(dir) + fs := NewOSFilesystem(dir) fileFinder, err := NewSimpleFileFinder(fs, opts) if err != nil { return nil, err @@ -27,7 +27,7 @@ func NewSimpleStorage(dir string, namespacer core.Namespacer, opts SimpleFileFin return NewGeneric(fileFinder, namespacer) } -func NewSimpleFileFinder(fs AferoContext, opts SimpleFileFinderOptions) (*SimpleFileFinder, error) { +func NewSimpleFileFinder(fs Filesystem, opts SimpleFileFinderOptions) (*SimpleFileFinder, error) { if fs == nil { return nil, fmt.Errorf("NewSimpleFileFinder: fs is mandatory") } @@ -68,7 +68,7 @@ var _ FileFinder = &SimpleFileFinder{} // // This FileFinder does not support the ObjectAt method. type SimpleFileFinder struct { - fs AferoContext + fs Filesystem opts SimpleFileFinderOptions } @@ -85,7 +85,7 @@ type SimpleFileFinderOptions struct { // TODO: Use group name "core" if group is "" to support core k8s objects. -func (f *SimpleFileFinder) Filesystem() AferoContext { +func (f *SimpleFileFinder) Filesystem() Filesystem { return f.fs } @@ -214,7 +214,7 @@ func (f *SimpleFileFinder) ListObjectIDs(ctx context.Context, gk core.GroupKind, return ids, nil } -func readDir(ctx context.Context, fs AferoContext, dir string) ([]string, error) { +func readDir(ctx context.Context, fs Filesystem, dir string) ([]string, error) { fi, err := fs.Stat(ctx, dir) if os.IsNotExist(err) { // It's ok if the directory doesn't exist (yet), we just don't have any items then :) diff --git a/pkg/storage/filesystem/filesystem.go b/pkg/storage/filesystem/filesystem.go index 5897d86b..2a230671 100644 --- a/pkg/storage/filesystem/filesystem.go +++ b/pkg/storage/filesystem/filesystem.go @@ -46,8 +46,8 @@ type Filesystem interface { RootDirectory() string } -// NewOSFilesystem creates a new afero.OsFs for the local directory, wrapped -// in FilesystemWrapperForDir. +// NewOSFilesystem creates a new afero.OsFs for the local directory, using +// NewFilesystem underneath. func NewOSFilesystem(rootDir string) Filesystem { return NewFilesystem(afero.NewOsFs(), rootDir) } @@ -55,6 +55,10 @@ func NewOSFilesystem(rootDir string) Filesystem { // NewFilesystem wraps an underlying afero.Fs without context knowledge, // in a Filesystem-compliant implementation; scoped at the given directory // (i.e. wrapped in afero.NewBasePathFs(fs, rootDir)). +// +// Checksum is calculated based on the modification timestamp of the file, or +// alternatively, from info.Sys() returned from Filesystem.Stat(), if it can +// be cast to a ChecksumContainer. func NewFilesystem(fs afero.Fs, rootDir string) Filesystem { // TODO: rootDir validation? It must be absolute, exist, and be a directory. return &filesystem{afero.NewBasePathFs(fs, rootDir), rootDir} diff --git a/pkg/storage/filesystem/format.go b/pkg/storage/filesystem/format.go index f5fd4a30..c4a33ce2 100644 --- a/pkg/storage/filesystem/format.go +++ b/pkg/storage/filesystem/format.go @@ -19,9 +19,9 @@ var ( // for making the judgement. See DefaultContentTyper for a sample implementation. type ContentTyper interface { // ContentTypeForPath should return the content type for the file that exists in - // the given AferoContext (path is relative). If the content type cannot be determined + // the given Filesystem (path is relative). If the content type cannot be determined // please return a wrapped ErrCannotDetermineContentType error. - ContentTypeForPath(ctx context.Context, fs AferoContext, path string) (serializer.ContentType, error) + ContentTypeForPath(ctx context.Context, fs Filesystem, path string) (serializer.ContentType, error) } // DefaultContentTypes describes the default connection between @@ -41,7 +41,7 @@ var DefaultContentTyper ContentTyper = ContentTypeForExtension{ // and ".yml" -> ContentTypeYAML. type ContentTypeForExtension map[string]serializer.ContentType -func (m ContentTypeForExtension) ContentTypeForPath(ctx context.Context, _ AferoContext, path string) (serializer.ContentType, error) { +func (m ContentTypeForExtension) ContentTypeForPath(ctx context.Context, _ Filesystem, path string) (serializer.ContentType, error) { ct, ok := m[filepath.Ext(path)] if !ok { return serializer.ContentType(""), fmt.Errorf("%w for file %q", ErrCannotDetermineContentType, path) diff --git a/pkg/storage/filesystem/interfaces.go b/pkg/storage/filesystem/interfaces.go index 286a0d64..0c691034 100644 --- a/pkg/storage/filesystem/interfaces.go +++ b/pkg/storage/filesystem/interfaces.go @@ -31,7 +31,7 @@ type Storage interface { type FileFinder interface { // Filesystem gets the underlying filesystem abstraction, if // applicable. - Filesystem() AferoContext + Filesystem() Filesystem // ObjectPath gets the file path relative to the root directory. // In order to support a create operation, this function must also return a valid path for diff --git a/pkg/storage/filesystem/path_excluder.go b/pkg/storage/filesystem/path_excluder.go index 48a3df1e..9c4f3c03 100644 --- a/pkg/storage/filesystem/path_excluder.go +++ b/pkg/storage/filesystem/path_excluder.go @@ -13,7 +13,7 @@ type PathExcluder interface { // ShouldExcludePath takes in a context, the fs filesystem abstraction, // and a relative path to the file which should be determined if it should // be excluded or not. - ShouldExcludePath(ctx context.Context, fs AferoContext, path string) bool + ShouldExcludePath(ctx context.Context, fs Filesystem, path string) bool } // ExcludeGitDirectory implements PathExcluder. @@ -23,7 +23,7 @@ var _ PathExcluder = ExcludeGitDirectory{} // all files under a ".git" directory, anywhere in the tree under the root directory. type ExcludeGitDirectory struct{} -func (ExcludeGitDirectory) ShouldExcludePath(_ context.Context, _ AferoContext, path string) bool { +func (ExcludeGitDirectory) ShouldExcludePath(_ context.Context, _ Filesystem, path string) bool { // Always start from a clean path path = filepath.Clean(path) for { diff --git a/pkg/storage/filesystem/storage.go b/pkg/storage/filesystem/storage.go index a5e4e400..0966310e 100644 --- a/pkg/storage/filesystem/storage.go +++ b/pkg/storage/filesystem/storage.go @@ -3,9 +3,7 @@ package filesystem import ( "context" "fmt" - "os" "path/filepath" - "strconv" "github.com/weaveworks/libgitops/pkg/serializer" "github.com/weaveworks/libgitops/pkg/storage" @@ -30,12 +28,8 @@ func NewGeneric(fileFinder FileFinder, namespacer core.Namespacer) (Storage, err } // Generic is a Storage-compliant implementation, that -// combines the given lower-level FileFinder, Namespacer and AferoContext interfaces +// combines the given lower-level FileFinder, Namespacer and Filesystem interfaces // in a generic manner. -// -// Checksum is calculated based on the modification timestamp of the file, or -// alternatively, from info.Sys() returned from AferoContext.Stat(), if it can -// be cast to a ChecksumContainer. type Generic struct { fileFinder FileFinder namespacer core.Namespacer @@ -84,19 +78,15 @@ func (r *Generic) Stat(ctx context.Context, id core.UnversionedObjectID) (storag return nil, err } - // Stat the file - info, err := r.FileFinder().Filesystem().Stat(ctx, p) - if os.IsNotExist(err) { + // Make sure the file exists + if !r.exists(ctx, p) { return nil, core.NewErrNotFound(id) - } else if err != nil { - return nil, err } - // Get checksum - checksum := checksumFromFileInfo(info) - // Allow a custom implementation of afero return ObjectInfo directly - if chk, ok := info.Sys().(storage.ChecksumContainer); ok { - checksum = chk.Checksum() + // Get the checksum + checksum, err := r.FileFinder().Filesystem().Checksum(ctx, p) + if err != nil { + return nil, err } // Get content type @@ -173,7 +163,7 @@ func (r *Generic) ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.S // must only return object IDs for that given namespace. func (r *Generic) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) ([]core.UnversionedObjectID, error) { // Validate the namespace parameter - if err := VerifyNamespaced(r.Namespacer(), gk, namespace); err != nil { + if err := storage.VerifyNamespaced(r.Namespacer(), gk, namespace); err != nil { return nil, err } // Just use the underlying filefinder @@ -190,26 +180,5 @@ func (r *Generic) getPath(ctx context.Context, id core.UnversionedObjectID) (str } func (r *Generic) verifyID(id core.UnversionedObjectID) error { - return VerifyNamespaced(r.Namespacer(), id.GroupKind(), id.ObjectKey().Namespace) -} - -// TODO: Move to the Filesystem abstraction -func checksumFromFileInfo(fi os.FileInfo) string { - return strconv.FormatInt(fi.ModTime().UnixNano(), 10) -} - -// VerifyNamespaced verifies that the given GroupKind and namespace parameter follows -// the rule of the Namespacer. -func VerifyNamespaced(namespacer core.Namespacer, gk core.GroupKind, ns string) error { - // Get namespacing info - namespaced, err := namespacer.IsNamespaced(gk) - if err != nil { - return err - } - if namespaced && ns == "" { - return fmt.Errorf("%w: namespaced kind %v requires non-empty namespace", storage.ErrNamespacedMismatch, gk) - } else if !namespaced && ns != "" { - return fmt.Errorf("%w: non-namespaced kind %v must not have namespace parameter set", storage.ErrNamespacedMismatch, gk) - } - return nil + return storage.VerifyNamespaced(r.Namespacer(), id.GroupKind(), id.ObjectKey().Namespace) } diff --git a/pkg/storage/filesystem/unstructured/filefinder_mapped.go b/pkg/storage/filesystem/unstructured/filefinder_mapped.go index 7c1a7430..6684658a 100644 --- a/pkg/storage/filesystem/unstructured/filefinder_mapped.go +++ b/pkg/storage/filesystem/unstructured/filefinder_mapped.go @@ -22,7 +22,7 @@ var _ MappedFileFinder = &GenericMappedFileFinder{} // NewGenericMappedFileFinder creates a new instance of GenericMappedFileFinder, // that implements the MappedFileFinder interface. The contentTyper is optional, // by default core.DefaultContentTyper will be used. -func NewGenericMappedFileFinder(contentTyper filesystem.ContentTyper, fs filesystem.AferoContext) MappedFileFinder { +func NewGenericMappedFileFinder(contentTyper filesystem.ContentTyper, fs filesystem.Filesystem) MappedFileFinder { if contentTyper == nil { contentTyper = filesystem.DefaultContentTyper } @@ -49,12 +49,12 @@ func NewGenericMappedFileFinder(contentTyper filesystem.ContentTyper, fs filesys type GenericMappedFileFinder struct { // Default: DefaultContentTyper contentTyper filesystem.ContentTyper - fs filesystem.AferoContext + fs filesystem.Filesystem branch branch } -func (f *GenericMappedFileFinder) Filesystem() filesystem.AferoContext { +func (f *GenericMappedFileFinder) Filesystem() filesystem.Filesystem { return f.fs } diff --git a/pkg/storage/filesystem/unstructured/interfaces.go b/pkg/storage/filesystem/unstructured/interfaces.go index 0cb398f4..9f8227b7 100644 --- a/pkg/storage/filesystem/unstructured/interfaces.go +++ b/pkg/storage/filesystem/unstructured/interfaces.go @@ -57,6 +57,6 @@ type ChecksumPath struct { // TODO: Implement ChecksumContainer, or make ChecksumPath a // sub-interface of ObjectID? Checksum string - // Note: path is relative to the AferoContext. + // Note: path is relative to filesystem.Filesystem.RootDirectory(). Path string } diff --git a/pkg/storage/filesystem/watch/inotify/filewatcher.go b/pkg/storage/filesystem/watch/inotify/filewatcher.go index cf7efa08..f79a9d9b 100644 --- a/pkg/storage/filesystem/watch/inotify/filewatcher.go +++ b/pkg/storage/filesystem/watch/inotify/filewatcher.go @@ -10,8 +10,7 @@ import ( "github.com/rjeczalik/notify" "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus" - "github.com/spf13/afero" - "github.com/weaveworks/libgitops/pkg/storage/core" + "github.com/weaveworks/libgitops/pkg/storage/filesystem" "github.com/weaveworks/libgitops/pkg/storage/filesystem/watch" "github.com/weaveworks/libgitops/pkg/util/sync" "golang.org/x/sys/unix" @@ -61,7 +60,7 @@ func NewFileWatcher(dir string, opts ...FileWatcherOption) (watch.FileEventsEmit opts: *o, // afero operates on the local disk, but is by convention scoped to the local // directory that is being watched - afero: core.AferoContextWrapperForDir(afero.NewOsFs(), dir), + fs: filesystem.NewOSFilesystem(dir), batcher: sync.NewBatchWriter(o.BatchTimeout), } @@ -97,23 +96,23 @@ type FileWatcher struct { // afero is always the OsFs type, which means it is passing the calls through // directly to the local disk. It is used when talking to the given ContentTyper // in order to identify various content types. - afero core.AferoContext + fs filesystem.Filesystem // the batcher is used for properly sending many concurrent inotify events // as a group, after a specified timeout. This fixes the issue of one single // file operation being registered as many different inotify events batcher *sync.BatchWriter } -func (w *FileWatcher) ContentTyper() core.ContentTyper { +func (w *FileWatcher) ContentTyper() filesystem.ContentTyper { return w.opts.ContentTyper } -func (w *FileWatcher) PathExcluder() core.PathExcluder { +func (w *FileWatcher) PathExcluder() filesystem.PathExcluder { return w.opts.PathExcluder } -func (w *FileWatcher) Filesystem() core.AferoContext { - return w.afero +func (w *FileWatcher) Filesystem() filesystem.Filesystem { + return w.fs } func (w *FileWatcher) WatchForFileEvents(ctx context.Context, into watch.FileEventStream) error { @@ -133,7 +132,7 @@ func (w *FileWatcher) WatchForFileEvents(ctx context.Context, into watch.FileEve func (w *FileWatcher) validFile(path string) bool { ctx := context.Background() - return core.IsValidFileInFilesystem(ctx, w.afero, w.opts.ContentTyper, w.opts.PathExcluder, path) + return filesystem.IsValidFileInFilesystem(ctx, w.fs, w.opts.ContentTyper, w.opts.PathExcluder, path) } func (w *FileWatcher) monitorFunc() { diff --git a/pkg/storage/filesystem/watch/inotify/options.go b/pkg/storage/filesystem/watch/inotify/options.go index 796c7bd1..bb816a5b 100644 --- a/pkg/storage/filesystem/watch/inotify/options.go +++ b/pkg/storage/filesystem/watch/inotify/options.go @@ -3,7 +3,7 @@ package inotify import ( "time" - "github.com/weaveworks/libgitops/pkg/storage/core" + "github.com/weaveworks/libgitops/pkg/storage/filesystem" ) // How many inotify events we can buffer before watching is interrupted @@ -18,8 +18,8 @@ var _ FileWatcherOption = &FileWatcherOptions{} // Options specifies options for the FileWatcher type FileWatcherOptions struct { // PathExcluder specifies what files and directories to ignore - // Default: ExcludeGitDirectory{} - PathExcluder core.PathExcluder + // Default: filesystem.ExcludeGitDirectory{} + PathExcluder filesystem.PathExcluder // BatchTimeout specifies the duration to wait after last event // before dispatching grouped inotify events // Default: 1s @@ -27,8 +27,8 @@ type FileWatcherOptions struct { // ContentTyper specifies what content types to recognize. // All files for which ContentTyper returns a nil error will // be watched. - // Default: core.DefaultContentTyper - ContentTyper core.ContentTyper + // Default: filesystem.DefaultContentTyper + ContentTyper filesystem.ContentTyper // EventBufferSize describes how many inotify events can be buffered // before watching is interrupted/delayed. // Default: DefaultEventBufferSize @@ -60,9 +60,9 @@ func (o *FileWatcherOptions) ApplyOptions(opts []FileWatcherOption) *FileWatcher // defaultOptions returns the default options func defaultOptions() *FileWatcherOptions { return &FileWatcherOptions{ - PathExcluder: core.ExcludeGitDirectory{}, + PathExcluder: filesystem.ExcludeGitDirectory{}, BatchTimeout: 1 * time.Second, - ContentTyper: core.DefaultContentTyper, + ContentTyper: filesystem.DefaultContentTyper, EventBufferSize: DefaultEventBufferSize, } } diff --git a/pkg/storage/filesystem/watch/interfaces.go b/pkg/storage/filesystem/watch/interfaces.go index 5e502c62..d51d8a1b 100644 --- a/pkg/storage/filesystem/watch/interfaces.go +++ b/pkg/storage/filesystem/watch/interfaces.go @@ -37,7 +37,7 @@ type FileEventsEmitter interface { // ContentTyper returns the ContentTyper used internally ContentTyper() filesystem.ContentTyper // Filesystem returns the filesystem abstraction used internally - Filesystem() filesystem.AferoContext + Filesystem() filesystem.Filesystem // Close closes the emitter gracefully. io.Closer @@ -66,6 +66,6 @@ type FilesystemEventStorage interface { // When the Sync() function is run; the ObjectEvents that are emitted to the // listening channels with have ObjectEvent.Type == ObjectEventSync. type UnstructuredEventStorage interface { - unstructured.UnstructuredStorage + unstructured.Storage FileEventStorageCommon } diff --git a/pkg/storage/filesystem/watch/manifest/manifest.go b/pkg/storage/filesystem/watch/manifest/manifest.go index 478dc7df..9d0786a6 100644 --- a/pkg/storage/filesystem/watch/manifest/manifest.go +++ b/pkg/storage/filesystem/watch/manifest/manifest.go @@ -2,6 +2,8 @@ package manifest import ( "github.com/weaveworks/libgitops/pkg/storage/core" + "github.com/weaveworks/libgitops/pkg/storage/filesystem" + "github.com/weaveworks/libgitops/pkg/storage/filesystem/unstructured" "github.com/weaveworks/libgitops/pkg/storage/filesystem/watch" "github.com/weaveworks/libgitops/pkg/storage/filesystem/watch/inotify" ) @@ -11,14 +13,14 @@ import ( // inotify FileWatcher; all combined into an UnstructuredEventStorage. func NewManifestStorage( dir string, - contentTyper core.ContentTyper, + contentTyper filesystem.ContentTyper, namespacer core.Namespacer, recognizer core.ObjectRecognizer, - pathExcluder core.PathExcluder, + pathExcluder filesystem.PathExcluder, ) (watch.UnstructuredEventStorage, error) { - fs := core.AferoContextForLocalDir(dir) - fileFinder := raw.NewGenericMappedFileFinder(contentTyper, fs) - fsRaw, err := raw.NewGenericFilesystemStorage(fileFinder, namespacer) + fs := filesystem.NewOSFilesystem(dir) + fileFinder := unstructured.NewGenericMappedFileFinder(contentTyper, fs) + fsRaw, err := filesystem.NewGeneric(fileFinder, namespacer) if err != nil { return nil, err } diff --git a/pkg/storage/filesystem/watch/watch.go b/pkg/storage/filesystem/watch/watch.go index 23deafe2..3e06ccc2 100644 --- a/pkg/storage/filesystem/watch/watch.go +++ b/pkg/storage/filesystem/watch/watch.go @@ -7,7 +7,10 @@ import ( gosync "sync" "github.com/sirupsen/logrus" + "github.com/weaveworks/libgitops/pkg/storage" "github.com/weaveworks/libgitops/pkg/storage/core" + "github.com/weaveworks/libgitops/pkg/storage/filesystem" + "github.com/weaveworks/libgitops/pkg/storage/filesystem/unstructured" "github.com/weaveworks/libgitops/pkg/util/sync" ) @@ -22,22 +25,22 @@ const defaultEventsBufferSize = 4096 // Note: This WatchStorage only works for one-frame files (i.e. only one YAML document // per file is supported). func NewGenericUnstructuredEventStorage( - s raw.FilesystemStorage, + s filesystem.Storage, recognizer core.ObjectRecognizer, emitter FileEventsEmitter, syncInBeginning bool, ) (UnstructuredEventStorage, error) { // TODO: Possibly relax this requirement later, maybe it can also work for the SimpleFileFinder? - fileFinder, ok := s.FileFinder().(raw.MappedFileFinder) + fileFinder, ok := s.FileFinder().(unstructured.MappedFileFinder) if !ok { return nil, errors.New("the given filesystem.Storage must use a MappedFileFinder") } return &GenericUnstructuredEventStorage{ - FilesystemStorage: s, - recognizer: recognizer, - fileFinder: fileFinder, - emitter: emitter, + Storage: s, + recognizer: recognizer, + fileFinder: fileFinder, + emitter: emitter, inbound: make(FileEventStream, defaultEventsBufferSize), // outbound set by WatchForObjectEvents @@ -56,17 +59,17 @@ func NewGenericUnstructuredEventStorage( // Note: This WatchStorage only works for one-frame files (i.e. only one YAML document // per file is supported). type GenericUnstructuredEventStorage struct { - raw.FilesystemStorage + filesystem.Storage // the recognizer recognizes files recognizer core.ObjectRecognizer // mapped file finder - fileFinder raw.MappedFileFinder + fileFinder unstructured.MappedFileFinder // the filesystem events emitter emitter FileEventsEmitter // channels inbound FileEventStream - outbound ObjectEventStream + outbound storage.ObjectEventStream outboundMu *gosync.Mutex // goroutine @@ -84,11 +87,11 @@ func (s *GenericUnstructuredEventStorage) FileEventsEmitter() FileEventsEmitter return s.emitter } -func (s *GenericUnstructuredEventStorage) MappedFileFinder() raw.MappedFileFinder { +func (s *GenericUnstructuredEventStorage) MappedFileFinder() unstructured.MappedFileFinder { return s.fileFinder } -func (s *GenericUnstructuredEventStorage) WatchForObjectEvents(ctx context.Context, into ObjectEventStream) error { +func (s *GenericUnstructuredEventStorage) WatchForObjectEvents(ctx context.Context, into storage.ObjectEventStream) error { s.outboundMu.Lock() defer s.outboundMu.Unlock() // We don't support more than one listener @@ -117,7 +120,7 @@ func (s *GenericUnstructuredEventStorage) WatchForObjectEvents(ctx context.Conte func (s *GenericUnstructuredEventStorage) Sync(ctx context.Context) error { // List all valid files in the fs - files, err := core.ListValidFilesInFilesystem( + files, err := filesystem.ListValidFilesInFilesystem( ctx, s.emitter.Filesystem(), s.emitter.ContentTyper(), @@ -150,7 +153,7 @@ func (s *GenericUnstructuredEventStorage) Sync(ctx context.Context) error { // Add a mapping between this object and path s.setMapping(ctx, id, file) // Send a special "sync" event for this ObjectID to the events channel - s.sendEvent(ObjectEventSync, id) + s.sendEvent(storage.ObjectEventSync, id) } return nil @@ -166,8 +169,8 @@ func (s *GenericUnstructuredEventStorage) Write(ctx context.Context, id core.Unv } // Suspend the write event s.emitter.Suspend(ctx, p) - // Call the underlying raw.Storage - return s.FilesystemStorage.Write(ctx, id, content) + // Call the underlying filesystem.Storage + return s.Storage.Write(ctx, id, content) } // Delete deletes the resource indicated by the ID. @@ -180,13 +183,13 @@ func (s *GenericUnstructuredEventStorage) Delete(ctx context.Context, id core.Un } // Suspend the write event s.emitter.Suspend(ctx, p) - // Call the underlying raw.Storage - return s.FilesystemStorage.Delete(ctx, id) + // Call the underlying filesystem.Storage + return s.Storage.Delete(ctx, id) } func (s *GenericUnstructuredEventStorage) getPath(ctx context.Context, id core.UnversionedObjectID) (string, error) { // Verify namespacing info - if err := raw.VerifyNamespaced(s.Namespacer(), id.GroupKind(), id.ObjectKey().Namespace); err != nil { + if err := storage.VerifyNamespaced(s.Namespacer(), id.GroupKind(), id.ObjectKey().Namespace); err != nil { return "", err } // Get the path @@ -245,7 +248,7 @@ func (s *GenericUnstructuredEventStorage) handleDelete(ctx context.Context, even // Remove the mapping from the FileFinder cache for this ID as it's now deleted s.deleteMapping(ctx, objectID) // Send the delete event to the channel - s.sendEvent(ObjectEventDelete, objectID) + s.sendEvent(storage.ObjectEventDelete, objectID) return nil } @@ -275,7 +278,7 @@ func (s *GenericUnstructuredEventStorage) handleModifyMove(ctx context.Context, // TODO: In the future, maybe support multiple files pointing to the same // ObjectID? Case in point here is e.g. a Modify event for a known path that // changes the underlying ObjectID. - objectEvent := ObjectEventUpdate + objectEvent := storage.ObjectEventUpdate // Set the mapping if it didn't exist before; assume this is a Create event if _, ok := s.fileFinder.GetMapping(ctx, versionedID); !ok { // Add a mapping between this object and path. @@ -283,16 +286,16 @@ func (s *GenericUnstructuredEventStorage) handleModifyMove(ctx context.Context, // This is what actually determines if an Object is created, // so update the event to update.ObjectEventCreate here - objectEvent = ObjectEventCreate + objectEvent = storage.ObjectEventCreate } // Send the event to the channel s.sendEvent(objectEvent, versionedID) return nil } -func (s *GenericUnstructuredEventStorage) sendEvent(event ObjectEventType, id core.UnversionedObjectID) { +func (s *GenericUnstructuredEventStorage) sendEvent(event storage.ObjectEventType, id core.UnversionedObjectID) { logrus.Tracef("GenericUnstructuredEventStorage: Sending event: %v", event) - s.outbound <- &ObjectEvent{ + s.outbound <- &storage.ObjectEvent{ ID: id, Type: event, } @@ -312,7 +315,7 @@ func (s *GenericUnstructuredEventStorage) setMapping(ctx context.Context, id cor // the checksum accordingly, by using Stat like above, but taking into account that there might // not be a previous mapping, in which case one needs to create that first. - s.fileFinder.SetMapping(ctx, id, raw.ChecksumPath{ + s.fileFinder.SetMapping(ctx, id, unstructured.ChecksumPath{ Path: path, //Checksum: oi.Checksum(), }) From 18e9745d01b0053dd6b547c46345b82f361aff00 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Sun, 24 Jan 2021 01:10:35 +0200 Subject: [PATCH 052/149] Change the top-level Storage; remove the Stat() method in favor for two distinct/specific methods: ContentType and Checksum, for different purposes. Remove unused ChecksumContainer and ObjectInfo. Move the events-related things to a dedicated package. --- pkg/storage/{ => event}/event.go | 2 +- pkg/storage/event/interfaces.go | 31 +++++++++++++ pkg/storage/interfaces.go | 80 ++++++-------------------------- 3 files changed, 45 insertions(+), 68 deletions(-) rename pkg/storage/{ => event}/event.go (98%) create mode 100644 pkg/storage/event/interfaces.go diff --git a/pkg/storage/event.go b/pkg/storage/event/event.go similarity index 98% rename from pkg/storage/event.go rename to pkg/storage/event/event.go index 92881ee2..3f57fdb2 100644 --- a/pkg/storage/event.go +++ b/pkg/storage/event/event.go @@ -1,4 +1,4 @@ -package storage +package event import ( "fmt" diff --git a/pkg/storage/event/interfaces.go b/pkg/storage/event/interfaces.go new file mode 100644 index 00000000..b13c1860 --- /dev/null +++ b/pkg/storage/event/interfaces.go @@ -0,0 +1,31 @@ +package event + +import ( + "context" + "io" + + "github.com/weaveworks/libgitops/pkg/storage" +) + +// StorageCommon contains the methods that EventStorage adds to the +// to the normal Storage. +type StorageCommon interface { + // WatchForObjectEvents starts feeding ObjectEvents into the given "into" + // channel. The caller is responsible for setting a channel buffering + // limit large enough to not block normal operation. An error might + // be returned if a maximum amount of watches has been opened already, + // e.g. ErrTooManyWatches. + WatchForObjectEvents(ctx context.Context, into ObjectEventStream) error + + // Close closes the EventStorage and underlying resources gracefully. + io.Closer +} + +// EventStorage is the abstract combination of a normal Storage, and +// a possiblility to listen for changes to objects as they change. +// TODO: Maybe we could use some of controller-runtime's built-in functionality +// for watching for changes? +type EventStorage interface { + storage.Storage + StorageCommon +} diff --git a/pkg/storage/interfaces.go b/pkg/storage/interfaces.go index 73520c65..c5698e01 100644 --- a/pkg/storage/interfaces.go +++ b/pkg/storage/interfaces.go @@ -3,7 +3,6 @@ package storage import ( "context" "errors" - "io" "github.com/weaveworks/libgitops/pkg/serializer" "github.com/weaveworks/libgitops/pkg/storage/core" @@ -48,23 +47,26 @@ type Reader interface { // Read returns a resource's content based on the ID. // If the resource does not exist, it returns core.NewErrNotFound. Read(ctx context.Context, id core.UnversionedObjectID) ([]byte, error) - // Stat returns information about the object, e.g. checksum, - // content type, and possibly, path on disk (in the case of - // filesystem.Storage), or core.NewErrNotFound if not found - Stat(ctx context.Context, id core.UnversionedObjectID) (ObjectInfo, error) - // Resolve ContentType - ContentTypeResolver - // List operations - Lister -} + // Checksum returns a checksum of the Object with the given ID. + // + // What the checksum is is application-dependent, however, it + // should be the same for two invocations, as long as the stored + // data is the same. It might change over time although the + // underlying data did not. Examples of checksums that can be + // used is: the file modification timestamp, a sha256sum of the + // file content, or the latest Git commit when the file was + // changed. + Checksum(ctx context.Context, id core.UnversionedObjectID) (string, error) -type ContentTypeResolver interface { // ContentType returns the content type that should be used when serializing // the object with the given ID. This operation must function also before the // Object with the given id exists in the system, in order to be able to // create new Objects. ContentType(ctx context.Context, id core.UnversionedObjectID) (serializer.ContentType, error) + + // List operations + Lister } type Lister interface { @@ -88,39 +90,6 @@ type Lister interface { ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) ([]core.UnversionedObjectID, error) } -// ObjectInfo is the return value from Storage.Stat(). It provides the -// user with information about the given Object, e.g. its ContentType, -// a checksum, and its relative path on disk, if the Storage is a -// filesystem.Storage. -type ObjectInfo interface { - // ContentTyped returns the ContentType of the Object when stored. - serializer.ContentTyped - // ChecksumContainer knows how to retrieve the checksum of the file. - ChecksumContainer - // Path is the relative path between the AferoContext root dir and - // the Stat'd file. - Path() string - // ID returns the ID for the given Object. - ID() core.UnversionedObjectID -} - -// ChecksumContainer is an interface for exposing a checksum. -// -// What the checksum is is application-dependent, however, it -// should be the same for two invocations, as long as the stored -// data is the same. It might change over time although the -// underlying data did not. Examples of checksums that can be -// used is: the file modification timestamp, a sha256sum of the -// file content, or the latest Git commit when the file was -// changed. -// -// Look for documentation on the Storage you are using for more -// details on what checksum algorithm is used. -type ChecksumContainer interface { - // Checksum returns the checksum of the file. - Checksum() string -} - // Reader provides the write operations for the Storage. type Writer interface { StorageCommon @@ -132,26 +101,3 @@ type Writer interface { // If the resource does not exist, it returns ErrNotFound. Delete(ctx context.Context, id core.UnversionedObjectID) error } - -// EventStorageCommon contains the methods that EventStorage adds to the -// to the normal Storage. -type EventStorageCommon interface { - // WatchForObjectEvents starts feeding ObjectEvents into the given "into" - // channel. The caller is responsible for setting a channel buffering - // limit large enough to not block normal operation. An error might - // be returned if a maximum amount of watches has been opened already, - // e.g. ErrTooManyWatches. - WatchForObjectEvents(ctx context.Context, into ObjectEventStream) error - - // Close closes the EventStorage and underlying resources gracefully. - io.Closer -} - -// EventStorage is the abstract combination of a normal Storage, and -// a possiblility to listen for changes to objects as they change. -// TODO: Maybe we could use some of controller-runtime's built-in functionality -// for watching for changes? -type EventStorage interface { - Storage - EventStorageCommon -} From b28900c272331d0157df48306fe29609e90d675c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Sun, 24 Jan 2021 01:12:38 +0200 Subject: [PATCH 053/149] Always expose a ContentTyper for the FileFinder: create a generic "static" one for use with the SimpleFileFinder. --- pkg/storage/filesystem/filefinder_simple.go | 52 ++++++++++----------- pkg/storage/filesystem/format.go | 13 ++++++ pkg/storage/filesystem/interfaces.go | 7 ++- 3 files changed, 43 insertions(+), 29 deletions(-) diff --git a/pkg/storage/filesystem/filefinder_simple.go b/pkg/storage/filesystem/filefinder_simple.go index 539f2578..e0e6940e 100644 --- a/pkg/storage/filesystem/filefinder_simple.go +++ b/pkg/storage/filesystem/filefinder_simple.go @@ -16,14 +16,14 @@ import ( // NewSimpleStorage is a default opinionated constructor for a Storage // using SimpleFileFinder as the FileFinder, and the local disk as target. // If you need more advanced customizablility than provided here, you can compose -// the call to NewGenericStorage yourself. +// the call to filesystem.NewGeneric yourself. func NewSimpleStorage(dir string, namespacer core.Namespacer, opts SimpleFileFinderOptions) (Storage, error) { fs := NewOSFilesystem(dir) fileFinder, err := NewSimpleFileFinder(fs, opts) if err != nil { return nil, err } - // fileFinder and namespacer are validated by NewGenericStorage. + // fileFinder and namespacer are validated by filesystem.NewGeneric. return NewGeneric(fileFinder, namespacer) } @@ -31,7 +31,20 @@ func NewSimpleFileFinder(fs Filesystem, opts SimpleFileFinderOptions) (*SimpleFi if fs == nil { return nil, fmt.Errorf("NewSimpleFileFinder: fs is mandatory") } - return &SimpleFileFinder{fs: fs, opts: opts}, nil + ct := serializer.ContentTypeJSON + if len(opts.ContentType) != 0 { + ct = opts.ContentType + } + resolver := DefaultFileExtensionResolver + if opts.FileExtensionResolver != nil { + resolver = opts.FileExtensionResolver + } + return &SimpleFileFinder{ + fs: fs, + opts: opts, + contentTyper: StaticContentTyper{ContentType: ct}, + resolver: resolver, + }, nil } // isObjectIDNamespaced returns true if the ID is of a namespaced GroupKind, and @@ -68,8 +81,10 @@ var _ FileFinder = &SimpleFileFinder{} // // This FileFinder does not support the ObjectAt method. type SimpleFileFinder struct { - fs Filesystem - opts SimpleFileFinderOptions + fs Filesystem + opts SimpleFileFinderOptions + contentTyper StaticContentTyper + resolver FileExtensionResolver } type SimpleFileFinderOptions struct { @@ -89,6 +104,10 @@ func (f *SimpleFileFinder) Filesystem() Filesystem { return f.fs } +func (f *SimpleFileFinder) ContentTyper() ContentTyper { + return f.contentTyper +} + // ObjectPath gets the file path relative to the root directory func (f *SimpleFileFinder) ObjectPath(ctx context.Context, id core.UnversionedObjectID) (string, error) { // // @@ -128,29 +147,8 @@ func (f *SimpleFileFinder) ObjectAt(ctx context.Context, path string) (core.Unve return nil, errors.New("not implemented") } -// ContentType always returns f.ContentType, or ContentTypeJSON as a fallback if -// f.ContentType was not set. -func (f *SimpleFileFinder) ContentType(ctx context.Context, _ core.UnversionedObjectID) (serializer.ContentType, error) { - return f.contentType(), nil -} - func (f *SimpleFileFinder) ext() (string, error) { - resolver := f.opts.FileExtensionResolver - if resolver == nil { - resolver = DefaultFileExtensionResolver - } - ext, err := resolver.ExtensionForContentType(f.contentType()) - if err != nil { - return "", err - } - return ext, nil -} - -func (f *SimpleFileFinder) contentType() serializer.ContentType { - if len(f.opts.ContentType) != 0 { - return f.opts.ContentType - } - return serializer.ContentTypeJSON + return f.resolver.ExtensionForContentType(f.contentTyper.ContentType) } // ListNamespaces lists the available namespaces for the given GroupKind. diff --git a/pkg/storage/filesystem/format.go b/pkg/storage/filesystem/format.go index c4a33ce2..b36aa1cd 100644 --- a/pkg/storage/filesystem/format.go +++ b/pkg/storage/filesystem/format.go @@ -49,6 +49,19 @@ func (m ContentTypeForExtension) ContentTypeForPath(ctx context.Context, _ Files return ct, nil } +// StaticContentTyper always responds with the same, statically-set, ContentType for any path. +type StaticContentTyper struct { + // ContentType is a required field + ContentType serializer.ContentType +} + +func (t StaticContentTyper) ContentTypeForPath(_ context.Context, _ Filesystem, _ string) (serializer.ContentType, error) { + if len(t.ContentType) == 0 { + return "", fmt.Errorf("StaticContentTyper.ContentType must not be empty") + } + return t.ContentType, nil +} + // FileExtensionResolver knows how to resolve what file extension to use for // a given ContentType. type FileExtensionResolver interface { diff --git a/pkg/storage/filesystem/interfaces.go b/pkg/storage/filesystem/interfaces.go index 0c691034..2626680b 100644 --- a/pkg/storage/filesystem/interfaces.go +++ b/pkg/storage/filesystem/interfaces.go @@ -33,14 +33,17 @@ type FileFinder interface { // applicable. Filesystem() Filesystem + // ContentTyper gets the underlying ContentTyper used. The ContentTyper + // must always return a result although the underlying given path doesn't + // exist. + ContentTyper() ContentTyper + // ObjectPath gets the file path relative to the root directory. // In order to support a create operation, this function must also return a valid path for // files that do not yet exist on disk. ObjectPath(ctx context.Context, id core.UnversionedObjectID) (string, error) // ObjectAt retrieves the ID based on the given relative file path to fs. ObjectAt(ctx context.Context, path string) (core.UnversionedObjectID, error) - // The FileFinder should be able to resolve the content type for various IDs - storage.ContentTypeResolver // The FileFinder should be able to list namespaces and Object IDs storage.Lister } From 142e5f20734e418dd46e4cdba06c512364222df8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Sun, 24 Jan 2021 01:13:56 +0200 Subject: [PATCH 054/149] Change filesystem.Generic to use the Checksum/ContentType methods. --- pkg/storage/filesystem/filesystem.go | 17 ++++++++--- pkg/storage/filesystem/storage.go | 42 ++++++++++------------------ 2 files changed, 27 insertions(+), 32 deletions(-) diff --git a/pkg/storage/filesystem/filesystem.go b/pkg/storage/filesystem/filesystem.go index 2a230671..f523e7b4 100644 --- a/pkg/storage/filesystem/filesystem.go +++ b/pkg/storage/filesystem/filesystem.go @@ -38,6 +38,18 @@ type Filesystem interface { // Custom methods + // Checksum returns a checksum of the given file. + // + // What the checksum is is application-dependent, however, it + // should be the same for two invocations, as long as the stored + // data is the same. It might change over time although the + // underlying data did not. Examples of checksums that can be + // used is: the file modification timestamp, a sha256sum of the + // file content, or the latest Git commit when the file was + // changed. + // + // os.IsNotExist(err) can be used to check if the file doesn't + // exist. Checksum(ctx context.Context, filename string) (string, error) // RootDirectory specifies where on disk the root directory is stored. @@ -56,9 +68,7 @@ func NewOSFilesystem(rootDir string) Filesystem { // in a Filesystem-compliant implementation; scoped at the given directory // (i.e. wrapped in afero.NewBasePathFs(fs, rootDir)). // -// Checksum is calculated based on the modification timestamp of the file, or -// alternatively, from info.Sys() returned from Filesystem.Stat(), if it can -// be cast to a ChecksumContainer. +// Checksum is calculated based on the modification timestamp of the file. func NewFilesystem(fs afero.Fs, rootDir string) Filesystem { // TODO: rootDir validation? It must be absolute, exist, and be a directory. return &filesystem{afero.NewBasePathFs(fs, rootDir), rootDir} @@ -113,7 +123,6 @@ func (f *filesystem) Walk(_ context.Context, root string, walkFn filepath.WalkFu return afero.Walk(f.fs, root, walkFn) } -// TODO: Move to the Filesystem abstraction func checksumFromFileInfo(fi os.FileInfo) string { return strconv.FormatInt(fi.ModTime().UnixNano(), 10) } diff --git a/pkg/storage/filesystem/storage.go b/pkg/storage/filesystem/storage.go index 0966310e..f3bc2870 100644 --- a/pkg/storage/filesystem/storage.go +++ b/pkg/storage/filesystem/storage.go @@ -3,6 +3,7 @@ package filesystem import ( "context" "fmt" + "os" "path/filepath" "github.com/weaveworks/libgitops/pkg/serializer" @@ -71,40 +72,29 @@ func (r *Generic) exists(ctx context.Context, path string) bool { return exists } -func (r *Generic) Stat(ctx context.Context, id core.UnversionedObjectID) (storage.ObjectInfo, error) { +func (r *Generic) Checksum(ctx context.Context, id core.UnversionedObjectID) (string, error) { // Get the path and verify namespacing info p, err := r.getPath(ctx, id) if err != nil { - return nil, err - } - - // Make sure the file exists - if !r.exists(ctx, p) { - return nil, core.NewErrNotFound(id) + return "", err } - - // Get the checksum + // Return a "high level" error if the file does not exist checksum, err := r.FileFinder().Filesystem().Checksum(ctx, p) - if err != nil { - return nil, err - } - - // Get content type - contentType, err := r.ContentType(ctx, id) - if err != nil { - return nil, err + if os.IsNotExist(err) { + return "", core.NewErrNotFound(id) + } else if err != nil { + return "", err } - - return storage.NewObjectInfo(contentType, checksum, p, id), nil + return checksum, nil } func (r *Generic) ContentType(ctx context.Context, id core.UnversionedObjectID) (serializer.ContentType, error) { - // Verify namespacing info - if err := r.verifyID(id); err != nil { + // Get the path and verify namespacing info + p, err := r.getPath(ctx, id) + if err != nil { return "", err } - - return r.FileFinder().ContentType(ctx, id) + return r.FileFinder().ContentTyper().ContentTypeForPath(ctx, r.fileFinder.Filesystem(), p) } func (r *Generic) Write(ctx context.Context, id core.UnversionedObjectID, content []byte) error { @@ -172,13 +162,9 @@ func (r *Generic) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespac func (r *Generic) getPath(ctx context.Context, id core.UnversionedObjectID) (string, error) { // Verify namespacing info - if err := r.verifyID(id); err != nil { + if err := storage.VerifyNamespaced(r.Namespacer(), id.GroupKind(), id.ObjectKey().Namespace); err != nil { return "", err } // Get the path return r.FileFinder().ObjectPath(ctx, id) } - -func (r *Generic) verifyID(id core.UnversionedObjectID) error { - return storage.VerifyNamespaced(r.Namespacer(), id.GroupKind(), id.ObjectKey().Namespace) -} From 4204cbbf310fe6a781ad963655bd88cd6a95ecad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Sun, 24 Jan 2021 01:15:52 +0200 Subject: [PATCH 055/149] Move pkg/storage/filesystem/{watch,fileevents}. Make the Emitter interface a bit simpler, yay! --- .../{watch => fileevents}/events.go | 2 +- .../filesystem/fileevents/interfaces.go | 57 +++++++++++++++ pkg/storage/filesystem/watch/interfaces.go | 71 ------------------- 3 files changed, 58 insertions(+), 72 deletions(-) rename pkg/storage/filesystem/{watch => fileevents}/events.go (97%) create mode 100644 pkg/storage/filesystem/fileevents/interfaces.go delete mode 100644 pkg/storage/filesystem/watch/interfaces.go diff --git a/pkg/storage/filesystem/watch/events.go b/pkg/storage/filesystem/fileevents/events.go similarity index 97% rename from pkg/storage/filesystem/watch/events.go rename to pkg/storage/filesystem/fileevents/events.go index 4db6d63a..38c385aa 100644 --- a/pkg/storage/filesystem/watch/events.go +++ b/pkg/storage/filesystem/fileevents/events.go @@ -1,4 +1,4 @@ -package watch +package fileevents // FileEventType is an enum describing a change in a file's state type FileEventType byte diff --git a/pkg/storage/filesystem/fileevents/interfaces.go b/pkg/storage/filesystem/fileevents/interfaces.go new file mode 100644 index 00000000..77d7708e --- /dev/null +++ b/pkg/storage/filesystem/fileevents/interfaces.go @@ -0,0 +1,57 @@ +package fileevents + +import ( + "context" + "errors" + "io" + + "github.com/weaveworks/libgitops/pkg/storage/event" + "github.com/weaveworks/libgitops/pkg/storage/filesystem" +) + +var ( + // ErrTooManyWatches can happen when trying to register too many + // watching reciever channels to an event emitter. + ErrTooManyWatches = errors.New("too many watches already opened") +) + +// Emitter is an interface that provides high-level inotify-like +// behaviour to consumers. It can be used e.g. by even higher-level +// interfaces like FilesystemEventStorage. +type Emitter interface { + // WatchForFileEvents starts feeding FileEvents into the given "into" + // channel. The caller is responsible for setting a channel buffering + // limit large enough to not block normal operation. An error might + // be returned if a maximum amount of watches has been opened already, + // e.g. ErrTooManyWatches. + // + // Note that it is the receiver's responsibility to "validate" the + // file so it matches any user defined policy (e.g. only specific + // content types, or a PathExcluder has been given). + WatchForFileEvents(ctx context.Context, into FileEventStream) error + + // Suspend blocks the next event dispatch for this given path. Useful + // for not sending "your own" modification events into the + // FileEventStream that is listening. path is relative. + Suspend(ctx context.Context, path string) + + // Close closes the emitter gracefully. + io.Closer +} + +// StorageCommon is an extension to event.StorageCommon that +// also contains an underlying Emitter. This is meant to be +// used in tandem with filesystem.Storages. +type StorageCommon interface { + event.StorageCommon + + // FileEventsEmitter gets the Emitter used internally. + FileEventsEmitter() Emitter +} + +// FilesystemEventStorage is the combination of a filesystem.Storage, +// and the possibility to listen for object updates from a Emitter. +type FilesystemEventStorage interface { + filesystem.Storage + StorageCommon +} diff --git a/pkg/storage/filesystem/watch/interfaces.go b/pkg/storage/filesystem/watch/interfaces.go deleted file mode 100644 index d51d8a1b..00000000 --- a/pkg/storage/filesystem/watch/interfaces.go +++ /dev/null @@ -1,71 +0,0 @@ -package watch - -import ( - "context" - "errors" - "io" - - "github.com/weaveworks/libgitops/pkg/storage" - "github.com/weaveworks/libgitops/pkg/storage/filesystem" - "github.com/weaveworks/libgitops/pkg/storage/filesystem/unstructured" -) - -var ( - // ErrTooManyWatches can happen when trying to register too many - // watching reciever channels to an event emitter. - ErrTooManyWatches = errors.New("too many watches already opened") -) - -// FileEventsEmitter is an interface that provides high-level inotify-like -// behaviour to consumers. It can be used e.g. by even higher-level -// interfaces like FilesystemEventStorage. -type FileEventsEmitter interface { - // WatchForFileEvents starts feeding FileEvents into the given "into" - // channel. The caller is responsible for setting a channel buffering - // limit large enough to not block normal operation. An error might - // be returned if a maximum amount of watches has been opened already, - // e.g. ErrTooManyWatches. - WatchForFileEvents(ctx context.Context, into FileEventStream) error - - // Suspend blocks the next event dispatch for this given path. Useful - // for not sending "your own" modification events into the - // FileEventStream that is listening. path is relative. - Suspend(ctx context.Context, path string) - - // PathExcluder returns the PathExcluder used internally - PathExcluder() filesystem.PathExcluder - // ContentTyper returns the ContentTyper used internally - ContentTyper() filesystem.ContentTyper - // Filesystem returns the filesystem abstraction used internally - Filesystem() filesystem.Filesystem - - // Close closes the emitter gracefully. - io.Closer -} - -// FileEventStorageCommon is an extension to EventStorageCommon that -// also contains an underlying FileEventsEmitter. This is meant to be -// used in tandem with filesystem.Storages. -type FileEventStorageCommon interface { - storage.EventStorageCommon - - // FileEventsEmitter gets the FileEventsEmitter used internally. - FileEventsEmitter() FileEventsEmitter -} - -// FilesystemEventStorage is the combination of a filesystem.Storage, -// and the possibility to listen for object updates from a FileEventsEmitter. -type FilesystemEventStorage interface { - filesystem.Storage - FileEventStorageCommon -} - -// UnstructuredEventStorage is an extension of raw.UnstructuredStorage, that -// adds the possiblility to listen for object updates from a FileEventsEmitter. -// -// When the Sync() function is run; the ObjectEvents that are emitted to the -// listening channels with have ObjectEvent.Type == ObjectEventSync. -type UnstructuredEventStorage interface { - unstructured.Storage - FileEventStorageCommon -} From 054c09a5a929cb2270439a6b278326ea55420a1a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Sun, 24 Jan 2021 01:16:58 +0200 Subject: [PATCH 056/149] Make the MappedFileFinder expose ContentTyper. --- .../filesystem/unstructured/filefinder_mapped.go | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/pkg/storage/filesystem/unstructured/filefinder_mapped.go b/pkg/storage/filesystem/unstructured/filefinder_mapped.go index 6684658a..474919fa 100644 --- a/pkg/storage/filesystem/unstructured/filefinder_mapped.go +++ b/pkg/storage/filesystem/unstructured/filefinder_mapped.go @@ -5,7 +5,6 @@ import ( "errors" "github.com/fluxcd/go-git-providers/validation" - "github.com/weaveworks/libgitops/pkg/serializer" "github.com/weaveworks/libgitops/pkg/storage/core" "github.com/weaveworks/libgitops/pkg/storage/filesystem" "k8s.io/apimachinery/pkg/util/sets" @@ -58,6 +57,10 @@ func (f *GenericMappedFileFinder) Filesystem() filesystem.Filesystem { return f.fs } +func (f *GenericMappedFileFinder) ContentTyper() filesystem.ContentTyper { + return f.contentTyper +} + // ObjectPath gets the file path relative to the root directory func (f *GenericMappedFileFinder) ObjectPath(ctx context.Context, id core.UnversionedObjectID) (string, error) { cp, ok := f.GetMapping(ctx, id) @@ -119,16 +122,6 @@ func (f *GenericMappedFileFinder) ListObjectIDs(ctx context.Context, gk core.Gro return ids, nil } -func (f *GenericMappedFileFinder) ContentType(ctx context.Context, id core.UnversionedObjectID) (serializer.ContentType, error) { - // First, get the path - p, err := f.ObjectPath(ctx, id) - if err != nil { - return "", err - } - // Then, ask the ContentTyper - return f.contentTyper.ContentTypeForPath(ctx, f.fs, p) -} - // GetMapping retrieves a mapping in the system func (f *GenericMappedFileFinder) GetMapping(ctx context.Context, id core.UnversionedObjectID) (ChecksumPath, bool) { cp, ok := f.branch. From 72aefa8e8c7a54837fcad01b2faa33c75f2b18de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Sun, 24 Jan 2021 01:20:18 +0200 Subject: [PATCH 057/149] Make the inotify emitter simpler by removing the duty of it to validate the files. Change import paths. --- .../inotify/filewatcher.go | 80 ++++++------------- .../inotify/filewatcher_test.go | 16 ++-- .../{watch => fileevents}/inotify/options.go | 20 +---- 3 files changed, 35 insertions(+), 81 deletions(-) rename pkg/storage/filesystem/{watch => fileevents}/inotify/filewatcher.go (83%) rename pkg/storage/filesystem/{watch => fileevents}/inotify/filewatcher_test.go (86%) rename pkg/storage/filesystem/{watch => fileevents}/inotify/options.go (63%) diff --git a/pkg/storage/filesystem/watch/inotify/filewatcher.go b/pkg/storage/filesystem/fileevents/inotify/filewatcher.go similarity index 83% rename from pkg/storage/filesystem/watch/inotify/filewatcher.go rename to pkg/storage/filesystem/fileevents/inotify/filewatcher.go index f79a9d9b..3a7171da 100644 --- a/pkg/storage/filesystem/watch/inotify/filewatcher.go +++ b/pkg/storage/filesystem/fileevents/inotify/filewatcher.go @@ -10,8 +10,7 @@ import ( "github.com/rjeczalik/notify" "github.com/sirupsen/logrus" log "github.com/sirupsen/logrus" - "github.com/weaveworks/libgitops/pkg/storage/filesystem" - "github.com/weaveworks/libgitops/pkg/storage/filesystem/watch" + "github.com/weaveworks/libgitops/pkg/storage/filesystem/fileevents" "github.com/weaveworks/libgitops/pkg/util/sync" "golang.org/x/sys/unix" "k8s.io/apimachinery/pkg/util/sets" @@ -19,9 +18,9 @@ import ( var listenEvents = []notify.Event{notify.InDelete, notify.InCloseWrite, notify.InMovedFrom, notify.InMovedTo} -var eventMap = map[notify.Event]watch.FileEventType{ - notify.InDelete: watch.FileEventDelete, - notify.InCloseWrite: watch.FileEventModify, +var eventMap = map[notify.Event]fileevents.FileEventType{ + notify.InDelete: fileevents.FileEventDelete, + notify.InCloseWrite: fileevents.FileEventModify, } // combinedEvents describes the event combinations to concatenate, @@ -37,12 +36,12 @@ type notifyEvents []notify.EventInfo type eventStream chan notify.EventInfo // FileEvents is a slice of FileEvent pointers -type FileEvents []*watch.FileEvent +type FileEvents []*fileevents.FileEvent // NewFileWatcher returns a list of files in the watched directory in // addition to the generated FileWatcher, it can be used to populate // MappedRawStorage fileMappings -func NewFileWatcher(dir string, opts ...FileWatcherOption) (watch.FileEventsEmitter, error) { +func NewFileWatcher(dir string, opts ...FileWatcherOption) (fileevents.Emitter, error) { o := defaultOptions().ApplyOptions(opts) w := &FileWatcher{ @@ -58,9 +57,6 @@ func NewFileWatcher(dir string, opts ...FileWatcherOption) (watch.FileEventsEmit // monitor and dispatcher set by WatchForFileEvents, guarded by outboundMu opts: *o, - // afero operates on the local disk, but is by convention scoped to the local - // directory that is being watched - fs: filesystem.NewOSFilesystem(dir), batcher: sync.NewBatchWriter(o.BatchTimeout), } @@ -74,7 +70,7 @@ func NewFileWatcher(dir string, opts ...FileWatcherOption) (watch.FileEventsEmit return w, nil } -var _ watch.FileEventsEmitter = &FileWatcher{} +var _ fileevents.Emitter = &FileWatcher{} // FileWatcher recursively monitors changes in files in the given directory // and sends out events based on their state changes. Only files conforming @@ -84,7 +80,7 @@ type FileWatcher struct { dir string // channels inbound eventStream - outbound watch.FileEventStream + outbound fileevents.FileEventStream outboundMu *gosync.Mutex // new suspend logic suspendFiles sets.String @@ -92,36 +88,22 @@ type FileWatcher struct { // goroutines monitor *sync.Monitor dispatcher *sync.Monitor - opts FileWatcherOptions - // afero is always the OsFs type, which means it is passing the calls through - // directly to the local disk. It is used when talking to the given ContentTyper - // in order to identify various content types. - fs filesystem.Filesystem + + // opts + opts FileWatcherOptions // the batcher is used for properly sending many concurrent inotify events // as a group, after a specified timeout. This fixes the issue of one single // file operation being registered as many different inotify events batcher *sync.BatchWriter } -func (w *FileWatcher) ContentTyper() filesystem.ContentTyper { - return w.opts.ContentTyper -} - -func (w *FileWatcher) PathExcluder() filesystem.PathExcluder { - return w.opts.PathExcluder -} - -func (w *FileWatcher) Filesystem() filesystem.Filesystem { - return w.fs -} - -func (w *FileWatcher) WatchForFileEvents(ctx context.Context, into watch.FileEventStream) error { +func (w *FileWatcher) WatchForFileEvents(ctx context.Context, into fileevents.FileEventStream) error { w.outboundMu.Lock() defer w.outboundMu.Unlock() // We don't support more than one listener // TODO: maybe support many listeners in the future? if w.outbound != nil { - return fmt.Errorf("FileWatcher: not more than one watch supported: %w", watch.ErrTooManyWatches) + return fmt.Errorf("FileWatcher: not more than one watch supported: %w", fileevents.ErrTooManyWatches) } w.outbound = into // Start the backing goroutines @@ -130,11 +112,6 @@ func (w *FileWatcher) WatchForFileEvents(ctx context.Context, into watch.FileEve return nil // all ok } -func (w *FileWatcher) validFile(path string) bool { - ctx := context.Background() - return filesystem.IsValidFileInFilesystem(ctx, w.fs, w.opts.ContentTyper, w.opts.PathExcluder, path) -} - func (w *FileWatcher) monitorFunc() { log.Debug("FileWatcher: Monitoring thread started") defer log.Debug("FileWatcher: Monitoring thread stopped") @@ -187,7 +164,7 @@ func (w *FileWatcher) dispatchFunc() { } } -func (w *FileWatcher) sendUpdate(event *watch.FileEvent) { +func (w *FileWatcher) sendUpdate(event *fileevents.FileEvent) { // Get the relative path between the root directory and the changed file relativePath, err := filepath.Rel(w.dir, event.Path) if err != nil { @@ -197,10 +174,6 @@ func (w *FileWatcher) sendUpdate(event *watch.FileEvent) { // Replace the full path with the relative path for the signaling upstream event.Path = relativePath - if !w.validFile(event.Path) { - return // Skip invalid files - } - if w.shouldSuspendEvent(event.Path) { log.Debugf("FileWatcher: Skipping suspended event %s for path: %q", event.Type, event.Path) return // Skip the suspended event @@ -223,7 +196,6 @@ func (w *FileWatcher) Close() error { // Suspend enables a one-time suspend of the given path // TODO: clarify how the path should be formatted func (w *FileWatcher) Suspend(_ context.Context, path string) { - //w.suspendEvent = updateEvent w.suspendFilesMu.Lock() defer w.suspendFilesMu.Unlock() w.suspendFiles.Insert(path) @@ -244,22 +216,22 @@ func (w *FileWatcher) shouldSuspendEvent(path string) bool { return true } -func convertEvent(event notify.Event) watch.FileEventType { +func convertEvent(event notify.Event) fileevents.FileEventType { if updateEvent, ok := eventMap[event]; ok { return updateEvent } - return watch.FileEventNone + return fileevents.FileEventNone } -func convertUpdate(event notify.EventInfo) *watch.FileEvent { +func convertUpdate(event notify.EventInfo) *fileevents.FileEvent { fileEvent := convertEvent(event.Event()) - if fileEvent == watch.FileEventNone { + if fileEvent == fileevents.FileEventNone { // This should never happen panic(fmt.Sprintf("invalid event for update conversion: %q", event.Event().String())) } - return &watch.FileEvent{ + return &fileevents.FileEvent{ Path: event.Path(), Type: fileEvent, } @@ -293,20 +265,20 @@ func (m *moveCache) cookie() uint32 { // if only one is received, the file is moved in/out of a watched directory, which // is treated as a normal creation/deletion by this method. func (m *moveCache) incomplete() { - var evType watch.FileEventType + var evType fileevents.FileEventType switch m.event.Event() { case notify.InMovedFrom: - evType = watch.FileEventDelete + evType = fileevents.FileEventDelete case notify.InMovedTo: - evType = watch.FileEventModify + evType = fileevents.FileEventModify default: // This should never happen panic(fmt.Sprintf("moveCache: unrecognized event: %v", m.event.Event())) } log.Tracef("moveCache: Timer expired for %d, dispatching...", m.cookie()) - m.watcher.sendUpdate(&watch.FileEvent{Path: m.event.Path(), Type: evType}) + m.watcher.sendUpdate(&fileevents.FileEvent{Path: m.event.Path(), Type: evType}) // Delete the cache after the timer has fired moveCachesMu.Lock() @@ -330,7 +302,7 @@ var ( // move processes InMovedFrom and InMovedTo events in any order // and dispatches FileUpdates when a move is detected -func (w *FileWatcher) move(event notify.EventInfo) (moveUpdate *watch.FileEvent) { +func (w *FileWatcher) move(event notify.EventInfo) (moveUpdate *fileevents.FileEvent) { cookie := ievent(event).Cookie moveCachesMu.RLock() cache, ok := moveCaches[cookie] @@ -349,8 +321,8 @@ func (w *FileWatcher) move(event notify.EventInfo) (moveUpdate *watch.FileEvent) sourcePath, destPath = destPath, sourcePath fallthrough case notify.InMovedTo: - cache.cancel() // Cancel dispatching the cache's incomplete move - moveUpdate = &watch.FileEvent{Path: destPath, Type: watch.FileEventMove} // Register an internal, complete move instead + cache.cancel() // Cancel dispatching the cache's incomplete move + moveUpdate = &fileevents.FileEvent{Path: destPath, Type: fileevents.FileEventMove} // Register an internal, complete move instead log.Tracef("FileWatcher: Detected move: %q -> %q", sourcePath, destPath) } diff --git a/pkg/storage/filesystem/watch/inotify/filewatcher_test.go b/pkg/storage/filesystem/fileevents/inotify/filewatcher_test.go similarity index 86% rename from pkg/storage/filesystem/watch/inotify/filewatcher_test.go rename to pkg/storage/filesystem/fileevents/inotify/filewatcher_test.go index cc1fa7d6..c423f247 100644 --- a/pkg/storage/filesystem/watch/inotify/filewatcher_test.go +++ b/pkg/storage/filesystem/fileevents/inotify/filewatcher_test.go @@ -6,7 +6,7 @@ import ( "testing" "github.com/rjeczalik/notify" - "github.com/weaveworks/libgitops/pkg/storage/filesystem/watch" + "github.com/weaveworks/libgitops/pkg/storage/filesystem/fileevents" "golang.org/x/sys/unix" ) @@ -56,18 +56,18 @@ var testEvents = []notifyEvents{ var targets = []FileEventTypes{ { - watch.FileEventModify, + fileevents.FileEventModify, }, { - watch.FileEventDelete, + fileevents.FileEventDelete, }, { - watch.FileEventModify, - watch.FileEventMove, - watch.FileEventDelete, + fileevents.FileEventModify, + fileevents.FileEventMove, + fileevents.FileEventDelete, }, { - watch.FileEventModify, + fileevents.FileEventModify, }, {}, } @@ -95,7 +95,7 @@ func eventsEqual(a, b FileEventTypes) bool { } // FileEventTypes is a slice of FileEventType -type FileEventTypes []watch.FileEventType +type FileEventTypes []fileevents.FileEventType var _ fmt.Stringer = FileEventTypes{} diff --git a/pkg/storage/filesystem/watch/inotify/options.go b/pkg/storage/filesystem/fileevents/inotify/options.go similarity index 63% rename from pkg/storage/filesystem/watch/inotify/options.go rename to pkg/storage/filesystem/fileevents/inotify/options.go index bb816a5b..dbf52e95 100644 --- a/pkg/storage/filesystem/watch/inotify/options.go +++ b/pkg/storage/filesystem/fileevents/inotify/options.go @@ -2,8 +2,6 @@ package inotify import ( "time" - - "github.com/weaveworks/libgitops/pkg/storage/filesystem" ) // How many inotify events we can buffer before watching is interrupted @@ -15,20 +13,12 @@ type FileWatcherOption interface { var _ FileWatcherOption = &FileWatcherOptions{} -// Options specifies options for the FileWatcher +// FileWatcherOptions specifies options for the FileWatcher type FileWatcherOptions struct { - // PathExcluder specifies what files and directories to ignore - // Default: filesystem.ExcludeGitDirectory{} - PathExcluder filesystem.PathExcluder // BatchTimeout specifies the duration to wait after last event // before dispatching grouped inotify events // Default: 1s BatchTimeout time.Duration - // ContentTyper specifies what content types to recognize. - // All files for which ContentTyper returns a nil error will - // be watched. - // Default: filesystem.DefaultContentTyper - ContentTyper filesystem.ContentTyper // EventBufferSize describes how many inotify events can be buffered // before watching is interrupted/delayed. // Default: DefaultEventBufferSize @@ -36,15 +26,9 @@ type FileWatcherOptions struct { } func (o *FileWatcherOptions) ApplyToFileWatcher(target *FileWatcherOptions) { - if o.PathExcluder != nil { - target.PathExcluder = o.PathExcluder - } if o.BatchTimeout != 0 { target.BatchTimeout = o.BatchTimeout } - if o.ContentTyper != nil { - target.ContentTyper = o.ContentTyper - } if o.EventBufferSize != 0 { target.EventBufferSize = o.EventBufferSize } @@ -60,9 +44,7 @@ func (o *FileWatcherOptions) ApplyOptions(opts []FileWatcherOption) *FileWatcher // defaultOptions returns the default options func defaultOptions() *FileWatcherOptions { return &FileWatcherOptions{ - PathExcluder: filesystem.ExcludeGitDirectory{}, BatchTimeout: 1 * time.Second, - ContentTyper: filesystem.DefaultContentTyper, EventBufferSize: DefaultEventBufferSize, } } From 5a6d85910fd1b7ab669fe5e0329fe65b8fc40343 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Sun, 24 Jan 2021 01:21:45 +0200 Subject: [PATCH 058/149] Polish the unstructured.Storage interface a bit, and create a first implementation --- .../filesystem/unstructured/interfaces.go | 25 +++- .../filesystem/unstructured/storage.go | 120 ++++++++++++++++++ 2 files changed, 139 insertions(+), 6 deletions(-) create mode 100644 pkg/storage/filesystem/unstructured/storage.go diff --git a/pkg/storage/filesystem/unstructured/interfaces.go b/pkg/storage/filesystem/unstructured/interfaces.go index 9f8227b7..814b4379 100644 --- a/pkg/storage/filesystem/unstructured/interfaces.go +++ b/pkg/storage/filesystem/unstructured/interfaces.go @@ -19,13 +19,12 @@ type Storage interface { // Sync synchronizes the current state of the filesystem with the // cached mappings in the MappedFileFinder. - Sync(ctx context.Context) error + Sync(ctx context.Context) ([]ChecksumPathID, error) // ObjectRecognizer returns the underlying ObjectRecognizer used. ObjectRecognizer() core.ObjectRecognizer // PathExcluder specifies what paths to not sync - // TODO: enable this - // PathExcluder() core.PathExcluder + PathExcluder() filesystem.PathExcluder // MappedFileFinder returns the underlying MappedFileFinder used. MappedFileFinder() MappedFileFinder } @@ -54,9 +53,23 @@ type MappedFileFinder interface { // ChecksumPath is a tuple of a given Checksum and relative file Path, // for use in MappedFileFinder. type ChecksumPath struct { - // TODO: Implement ChecksumContainer, or make ChecksumPath a - // sub-interface of ObjectID? + // Checksum is the checksum of the file at the given path. + // + // What the checksum is is application-dependent, however, it + // should be the same for two invocations, as long as the stored + // data is the same. It might change over time although the + // underlying data did not. Examples of checksums that can be + // used is: the file modification timestamp, a sha256sum of the + // file content, or the latest Git commit when the file was + // changed. + // + // The checksum is calculated by the filesystem.Filesystem. Checksum string - // Note: path is relative to filesystem.Filesystem.RootDirectory(). + // Path to the file, relative to filesystem.Filesystem.RootDirectory(). Path string } + +type ChecksumPathID struct { + ChecksumPath + ID core.ObjectID +} diff --git a/pkg/storage/filesystem/unstructured/storage.go b/pkg/storage/filesystem/unstructured/storage.go new file mode 100644 index 00000000..f753ae8f --- /dev/null +++ b/pkg/storage/filesystem/unstructured/storage.go @@ -0,0 +1,120 @@ +package unstructured + +import ( + "context" + "errors" + "fmt" + + "github.com/sirupsen/logrus" + "github.com/weaveworks/libgitops/pkg/storage/core" + "github.com/weaveworks/libgitops/pkg/storage/filesystem" +) + +func NewGeneric(storage filesystem.Storage, recognizer core.ObjectRecognizer, pathExcluder filesystem.PathExcluder) (Storage, error) { + if storage == nil { + return nil, fmt.Errorf("storage is mandatory") + } + if recognizer == nil { + return nil, fmt.Errorf("recognizer is mandatory") + } + mappedFileFinder, ok := storage.FileFinder().(MappedFileFinder) + if !ok { + return nil, errors.New("the given filesystem.Storage must use a MappedFileFinder") + } + return &Generic{ + Storage: storage, + recognizer: recognizer, + mappedFileFinder: mappedFileFinder, + pathExcluder: pathExcluder, + }, nil +} + +type Generic struct { + filesystem.Storage + recognizer core.ObjectRecognizer + mappedFileFinder MappedFileFinder + pathExcluder filesystem.PathExcluder +} + +// Sync synchronizes the current state of the filesystem with the +// cached mappings in the MappedFileFinder. +func (s *Generic) Sync(ctx context.Context) ([]ChecksumPathID, error) { + fileFinder := s.MappedFileFinder() + + // List all valid files in the fs + files, err := filesystem.ListValidFilesInFilesystem( + ctx, + fileFinder.Filesystem(), + fileFinder.ContentTyper(), + s.pathExcluder, + ) + if err != nil { + return nil, err + } + + // Send SYNC events for all files (and fill the mappings + // of the MappedFileFinder) before starting to monitor changes + updatedFiles := make([]ChecksumPathID, 0, len(files)) + for _, filePath := range files { + // Get the current checksum of the file + currentChecksum, err := fileFinder.Filesystem().Checksum(ctx, filePath) + if err != nil { + logrus.Errorf("Could not get checksum for file %q: %v", filePath, err) + continue + } + + // If the given file already is tracked; i.e. has a mapping with a + // non-empty checksum, and the current checksum matches, we do not + // need to do anything. + if id, err := fileFinder.ObjectAt(ctx, filePath); err == nil { + if cp, ok := fileFinder.GetMapping(ctx, id); ok && len(cp.Checksum) != 0 { + if cp.Checksum == currentChecksum { + logrus.Tracef("Checksum for file %q is up-to-date: %q, skipping...", filePath, cp.Checksum) + continue + } + } + } + + // If the file is not known to the FileFinder yet, or if the checksum + // was empty, read the file, and recognize it. + content, err := s.FileFinder().Filesystem().ReadFile(ctx, filePath) + if err != nil { + logrus.Warnf("Ignoring %q: %v", filePath, err) + continue + } + + id, err := s.recognizer.ResolveObjectID(ctx, filePath, content) + if err != nil { + logrus.Warnf("Could not recognize object ID in %q: %v", filePath, err) + continue + } + + // Add a mapping between this object and path + cp := ChecksumPath{ + Checksum: currentChecksum, + Path: filePath, + } + s.MappedFileFinder().SetMapping(ctx, id, cp) + // Add to the slice which we'll return + updatedFiles = append(updatedFiles, ChecksumPathID{ + ChecksumPath: cp, + ID: id, + }) + } + return updatedFiles, nil +} + +// ObjectRecognizer returns the underlying ObjectRecognizer used. +func (s *Generic) ObjectRecognizer() core.ObjectRecognizer { + return s.recognizer +} + +// PathExcluder specifies what paths to not sync +func (s *Generic) PathExcluder() filesystem.PathExcluder { + return s.pathExcluder +} + +// MappedFileFinder returns the underlying MappedFileFinder used. +func (s *Generic) MappedFileFinder() MappedFileFinder { + return s.mappedFileFinder +} From 6076ce0da27002d6e6b5c2ce3d4bf0d53badaa20 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Sun, 24 Jan 2021 01:25:17 +0200 Subject: [PATCH 059/149] Move the "WatchStorage" from ./watch/ to ./unstructured/event. Make it build on top of the unstructured.Storage, and the fileevents.Emitter. --- .../event/storage.go} | 268 ++++++++++-------- .../filesystem/watch/manifest/manifest.go | 35 --- 2 files changed, 144 insertions(+), 159 deletions(-) rename pkg/storage/filesystem/{watch/watch.go => unstructured/event/storage.go} (50%) delete mode 100644 pkg/storage/filesystem/watch/manifest/manifest.go diff --git a/pkg/storage/filesystem/watch/watch.go b/pkg/storage/filesystem/unstructured/event/storage.go similarity index 50% rename from pkg/storage/filesystem/watch/watch.go rename to pkg/storage/filesystem/unstructured/event/storage.go index 3e06ccc2..59ea6cee 100644 --- a/pkg/storage/filesystem/watch/watch.go +++ b/pkg/storage/filesystem/unstructured/event/storage.go @@ -1,22 +1,64 @@ -package watch +package unstructuredevent import ( "context" - "errors" "fmt" gosync "sync" "github.com/sirupsen/logrus" "github.com/weaveworks/libgitops/pkg/storage" "github.com/weaveworks/libgitops/pkg/storage/core" + "github.com/weaveworks/libgitops/pkg/storage/event" "github.com/weaveworks/libgitops/pkg/storage/filesystem" + "github.com/weaveworks/libgitops/pkg/storage/filesystem/fileevents" + "github.com/weaveworks/libgitops/pkg/storage/filesystem/fileevents/inotify" "github.com/weaveworks/libgitops/pkg/storage/filesystem/unstructured" "github.com/weaveworks/libgitops/pkg/util/sync" ) +// UnstructuredEventStorage is an extension of raw.UnstructuredStorage, that +// adds the possiblility to listen for object updates from a FileEventsEmitter. +// +// When the Sync() function is run; the ObjectEvents that are emitted to the +// listening channels with have ObjectEvent.Type == ObjectEventSync. +type UnstructuredEventStorage interface { + unstructured.Storage + fileevents.StorageCommon +} + const defaultEventsBufferSize = 4096 -// NewGenericUnstructuredEventStorage is an extended Storage implementation, which +// NewManifest is a high-level constructor for a generic +// MappedFileFinder and filesystem.Storage, together with a +// inotify FileWatcher; all combined into an UnstructuredEventStorage. +func NewManifest( + dir string, + contentTyper filesystem.ContentTyper, + namespacer core.Namespacer, + recognizer core.ObjectRecognizer, + pathExcluder filesystem.PathExcluder, +) (UnstructuredEventStorage, error) { + fs := filesystem.NewOSFilesystem(dir) + fileFinder := unstructured.NewGenericMappedFileFinder(contentTyper, fs) + fsRaw, err := filesystem.NewGeneric(fileFinder, namespacer) + if err != nil { + return nil, err + } + emitter, err := inotify.NewFileWatcher(dir) + if err != nil { + return nil, err + } + unstructuredRaw, err := unstructured.NewGeneric(fsRaw, recognizer, pathExcluder) + if err != nil { + return nil, err + } + return NewGeneric(unstructuredRaw, emitter, GenericStorageOptions{ + SyncAtStart: true, + EmitSyncEvent: true, + }) +} + +// NewGeneric is an extended Storage implementation, which // together with the provided ObjectRecognizer and FileEventsEmitter listens for // file events, keeps the mappings of the filesystem.Storage's MappedFileFinder // in sync (s must use the mapped variant), and sends high-level ObjectEvents @@ -24,80 +66,72 @@ const defaultEventsBufferSize = 4096 // // Note: This WatchStorage only works for one-frame files (i.e. only one YAML document // per file is supported). -func NewGenericUnstructuredEventStorage( - s filesystem.Storage, - recognizer core.ObjectRecognizer, - emitter FileEventsEmitter, - syncInBeginning bool, +func NewGeneric( + s unstructured.Storage, + emitter fileevents.Emitter, + opts GenericStorageOptions, ) (UnstructuredEventStorage, error) { - // TODO: Possibly relax this requirement later, maybe it can also work for the SimpleFileFinder? - fileFinder, ok := s.FileFinder().(unstructured.MappedFileFinder) - if !ok { - return nil, errors.New("the given filesystem.Storage must use a MappedFileFinder") - } - - return &GenericUnstructuredEventStorage{ - Storage: s, - recognizer: recognizer, - fileFinder: fileFinder, - emitter: emitter, + return &Generic{ + Storage: s, + emitter: emitter, - inbound: make(FileEventStream, defaultEventsBufferSize), + inbound: make(fileevents.FileEventStream, defaultEventsBufferSize), // outbound set by WatchForObjectEvents outboundMu: &gosync.Mutex{}, // monitor set by WatchForObjectEvents, guarded by outboundMu - syncInBeginning: syncInBeginning, + opts: opts, }, nil } -// GenericUnstructuredEventStorage is an extended raw.Storage implementation, which provides a watcher +type GenericStorageOptions struct { + // When Sync(ctx) is run, emit a "SYNC" event to the listening channel + // Default: false + EmitSyncEvent bool + // Do a full re-sync at startup of the watcher + // Default: true + SyncAtStart bool +} + +// Generic implements UnstructuredEventStorage. +var _ UnstructuredEventStorage = &Generic{} + +// Generic is an extended raw.Storage implementation, which provides a watcher // for watching changes in the directory managed by the embedded Storage's RawStorage. // If the RawStorage is a MappedRawStorage instance, it's mappings will automatically // be updated by the WatchStorage. Update events are sent to the given event stream. // Note: This WatchStorage only works for one-frame files (i.e. only one YAML document // per file is supported). -type GenericUnstructuredEventStorage struct { - filesystem.Storage - // the recognizer recognizes files - recognizer core.ObjectRecognizer - // mapped file finder - fileFinder unstructured.MappedFileFinder +// TODO: Update description +type Generic struct { + unstructured.Storage // the filesystem events emitter - emitter FileEventsEmitter + emitter fileevents.Emitter // channels - inbound FileEventStream - outbound storage.ObjectEventStream + inbound fileevents.FileEventStream + outbound event.ObjectEventStream outboundMu *gosync.Mutex // goroutine monitor *sync.Monitor // opts - syncInBeginning bool + opts GenericStorageOptions } -func (s *GenericUnstructuredEventStorage) ObjectRecognizer() core.ObjectRecognizer { - return s.recognizer -} - -func (s *GenericUnstructuredEventStorage) FileEventsEmitter() FileEventsEmitter { +func (s *Generic) FileEventsEmitter() fileevents.Emitter { return s.emitter } -func (s *GenericUnstructuredEventStorage) MappedFileFinder() unstructured.MappedFileFinder { - return s.fileFinder -} - -func (s *GenericUnstructuredEventStorage) WatchForObjectEvents(ctx context.Context, into storage.ObjectEventStream) error { +func (s *Generic) WatchForObjectEvents(ctx context.Context, into event.ObjectEventStream) error { s.outboundMu.Lock() defer s.outboundMu.Unlock() // We don't support more than one listener // TODO: maybe support many listeners in the future? if s.outbound != nil { - return fmt.Errorf("WatchStorage: not more than one watch supported: %w", ErrTooManyWatches) + return fmt.Errorf("WatchStorage: not more than one watch supported: %w", fileevents.ErrTooManyWatches) } // Hook up our inbound channel to the emitter, to make the pipeline functional if err := s.emitter.WatchForFileEvents(ctx, s.inbound); err != nil { @@ -110,58 +144,36 @@ func (s *GenericUnstructuredEventStorage) WatchForObjectEvents(ctx context.Conte // Do a full sync in the beginning only if asked. Be aware that without running a Sync // at all before events start happening, the reporting might not work as it should - if s.syncInBeginning { - if err := s.Sync(ctx); err != nil { + if s.opts.SyncAtStart { + // Disregard the changed files at Sync. + if _, err := s.Sync(ctx); err != nil { return err } } return nil // all ok } -func (s *GenericUnstructuredEventStorage) Sync(ctx context.Context) error { - // List all valid files in the fs - files, err := filesystem.ListValidFilesInFilesystem( - ctx, - s.emitter.Filesystem(), - s.emitter.ContentTyper(), - s.emitter.PathExcluder(), - ) +func (s *Generic) Sync(ctx context.Context) ([]unstructured.ChecksumPathID, error) { + // Sync the underlying UnstructuredStorage, and see what files had changed since last sync + changedObjects, err := s.Storage.Sync(ctx) if err != nil { - return err + return nil, err } - // Send SYNC events for all files (and fill the mappings - // of the MappedRawStorage) before starting to monitor changes - for _, file := range files { - // TODO: when checksum support is added to setMapping, we can skip - // reading such files which already have an up-to-date checksum. - // TODO: Alternatively/also, we should support feeding an - // UnstructuredStorage, so that we can run its Sync() function instead - - content, err := s.FileFinder().Filesystem().ReadFile(ctx, file) - if err != nil { - logrus.Warnf("Ignoring %q: %v", file, err) - continue - } - - id, err := s.recognizer.ResolveObjectID(ctx, file, content) - if err != nil { - logrus.Warnf("Could not recognize object ID in %q: %v", file, err) - continue + // Send special "sync" events for each of the changed objects, if configured + if s.opts.EmitSyncEvent { + for _, changedObject := range changedObjects { + // Send a special "sync" event for this ObjectID to the events channel + s.sendEvent(event.ObjectEventSync, changedObject.ID) } - - // Add a mapping between this object and path - s.setMapping(ctx, id, file) - // Send a special "sync" event for this ObjectID to the events channel - s.sendEvent(storage.ObjectEventSync, id) } - return nil + return changedObjects, nil } // Write writes the given content to the resource indicated by the ID. // Error returns are implementation-specific. -func (s *GenericUnstructuredEventStorage) Write(ctx context.Context, id core.UnversionedObjectID, content []byte) error { +func (s *Generic) Write(ctx context.Context, id core.UnversionedObjectID, content []byte) error { // Get the path and verify namespacing info p, err := s.getPath(ctx, id) if err != nil { @@ -175,7 +187,7 @@ func (s *GenericUnstructuredEventStorage) Write(ctx context.Context, id core.Unv // Delete deletes the resource indicated by the ID. // If the resource does not exist, it returns ErrNotFound. -func (s *GenericUnstructuredEventStorage) Delete(ctx context.Context, id core.UnversionedObjectID) error { +func (s *Generic) Delete(ctx context.Context, id core.UnversionedObjectID) error { // Get the path and verify namespacing info p, err := s.getPath(ctx, id) if err != nil { @@ -187,7 +199,7 @@ func (s *GenericUnstructuredEventStorage) Delete(ctx context.Context, id core.Un return s.Storage.Delete(ctx, id) } -func (s *GenericUnstructuredEventStorage) getPath(ctx context.Context, id core.UnversionedObjectID) (string, error) { +func (s *Generic) getPath(ctx context.Context, id core.UnversionedObjectID) (string, error) { // Verify namespacing info if err := storage.VerifyNamespaced(s.Namespacer(), id.GroupKind(), id.ObjectKey().Namespace); err != nil { return "", err @@ -196,13 +208,13 @@ func (s *GenericUnstructuredEventStorage) getPath(ctx context.Context, id core.U return s.FileFinder().ObjectPath(ctx, id) } -func (s *GenericUnstructuredEventStorage) Close() error { +func (s *Generic) Close() error { err := s.emitter.Close() s.monitor.Wait() return err } -func (s *GenericUnstructuredEventStorage) monitorFunc() { +func (s *Generic) monitorFunc() { logrus.Debug("WatchStorage: Monitoring thread started") defer logrus.Debug("WatchStorage: Monitoring thread stopped") @@ -210,23 +222,34 @@ func (s *GenericUnstructuredEventStorage) monitorFunc() { for { // TODO: handle context cancellations, i.e. ctx.Done() - event, ok := <-s.inbound + ev, ok := <-s.inbound if !ok { logrus.Error("WatchStorage: Fatal: Got non-ok response from watcher.GetFileEventStream()") return } - logrus.Tracef("WatchStorage: Processing event: %s", event.Type) + logrus.Tracef("WatchStorage: Processing event: %s", ev.Type) + + // Skip the file if it has an invalid path + if !filesystem.IsValidFileInFilesystem( + ctx, + s.FileFinder().Filesystem(), + s.FileFinder().ContentTyper(), + s.PathExcluder(), + ev.Path) { + logrus.Tracef("WatchStorage: Skipping file %q as it is ignored by the ContentTyper/PathExcluder", ev.Path) + continue + } var err error - switch event.Type { + switch ev.Type { // FileEventModify is also sent for newly-created files - case FileEventModify, FileEventMove: - err = s.handleModifyMove(ctx, event) - case FileEventDelete: - err = s.handleDelete(ctx, event) + case fileevents.FileEventModify, fileevents.FileEventMove: + err = s.handleModifyMove(ctx, ev) + case fileevents.FileEventDelete: + err = s.handleDelete(ctx, ev) default: - err = fmt.Errorf("cannot handle update of type %v for path %q", event.Type, event.Path) + err = fmt.Errorf("cannot handle update of type %v for path %q", ev.Type, ev.Path) } if err != nil { logrus.Errorf("WatchStorage: %v", err) @@ -234,40 +257,40 @@ func (s *GenericUnstructuredEventStorage) monitorFunc() { } } -func (s *GenericUnstructuredEventStorage) handleDelete(ctx context.Context, event *FileEvent) error { +func (s *Generic) handleDelete(ctx context.Context, ev *fileevents.FileEvent) error { // The object is deleted, so we need to do a reverse-lookup of what kind of object // was there earlier, based on the path. This assumes that the filefinder organizes // the known objects in such a way that it is able to do the reverse-lookup. For // mapped FileFinders, by this point the path should still be in the local cache, // which should make us able to get the ID before deleted from the cache. - objectID, err := s.fileFinder.ObjectAt(ctx, event.Path) + objectID, err := s.MappedFileFinder().ObjectAt(ctx, ev.Path) if err != nil { - return fmt.Errorf("failed to reverse lookup ID for deleted file %q: %v", event.Path, err) + return fmt.Errorf("failed to reverse lookup ID for deleted file %q: %v", ev.Path, err) } // Remove the mapping from the FileFinder cache for this ID as it's now deleted s.deleteMapping(ctx, objectID) // Send the delete event to the channel - s.sendEvent(storage.ObjectEventDelete, objectID) + s.sendEvent(event.ObjectEventDelete, objectID) return nil } -func (s *GenericUnstructuredEventStorage) handleModifyMove(ctx context.Context, event *FileEvent) error { +func (s *Generic) handleModifyMove(ctx context.Context, ev *fileevents.FileEvent) error { // Read the content of this modified, moved or created file - content, err := s.FileFinder().Filesystem().ReadFile(ctx, event.Path) + content, err := s.FileFinder().Filesystem().ReadFile(ctx, ev.Path) if err != nil { - return fmt.Errorf("could not read %q: %v", event.Path, err) + return fmt.Errorf("could not read %q: %v", ev.Path, err) } // Try to recognize the object - versionedID, err := s.recognizer.ResolveObjectID(ctx, event.Path, content) + versionedID, err := s.ObjectRecognizer().ResolveObjectID(ctx, ev.Path, content) if err != nil { - return fmt.Errorf("did not recognize object at path %q: %v", event.Path, err) + return fmt.Errorf("did not recognize object at path %q: %v", ev.Path, err) } // If the file was just moved around, just overwrite the earlier mapping - if event.Type == FileEventMove { - s.setMapping(ctx, versionedID, event.Path) + if ev.Type == fileevents.FileEventMove { + s.setMapping(ctx, versionedID, ev.Path) // Internal move events are a no-op return nil @@ -278,50 +301,47 @@ func (s *GenericUnstructuredEventStorage) handleModifyMove(ctx context.Context, // TODO: In the future, maybe support multiple files pointing to the same // ObjectID? Case in point here is e.g. a Modify event for a known path that // changes the underlying ObjectID. - objectEvent := storage.ObjectEventUpdate + objectEvent := event.ObjectEventUpdate // Set the mapping if it didn't exist before; assume this is a Create event - if _, ok := s.fileFinder.GetMapping(ctx, versionedID); !ok { + if _, ok := s.MappedFileFinder().GetMapping(ctx, versionedID); !ok { // Add a mapping between this object and path. - s.setMapping(ctx, versionedID, event.Path) + s.setMapping(ctx, versionedID, ev.Path) // This is what actually determines if an Object is created, // so update the event to update.ObjectEventCreate here - objectEvent = storage.ObjectEventCreate + objectEvent = event.ObjectEventCreate } // Send the event to the channel s.sendEvent(objectEvent, versionedID) return nil } -func (s *GenericUnstructuredEventStorage) sendEvent(event storage.ObjectEventType, id core.UnversionedObjectID) { - logrus.Tracef("GenericUnstructuredEventStorage: Sending event: %v", event) - s.outbound <- &storage.ObjectEvent{ +func (s *Generic) sendEvent(eventType event.ObjectEventType, id core.UnversionedObjectID) { + logrus.Tracef("Generic: Sending event: %v", eventType) + s.outbound <- &event.ObjectEvent{ ID: id, - Type: event, + Type: eventType, } } // setMapping registers a mapping between the given object and the specified path, if raw is a // MappedRawStorage. If a given mapping already exists between this object and some path, it // will be overridden with the specified new path -func (s *GenericUnstructuredEventStorage) setMapping(ctx context.Context, id core.UnversionedObjectID, path string) { - /*oi, err := s.FilesystemStorage.Stat(ctx, id) +func (s *Generic) setMapping(ctx context.Context, id core.UnversionedObjectID, path string) { + // Get the current checksum of the new file + checksum, err := s.MappedFileFinder().Filesystem().Checksum(ctx, path) if err != nil { - logrus.Errorf("WatchStorage: Got error when Stat-ing object with id %v: %v", id, err) + logrus.Errorf("Unexpected error when getting checksum of file %q: %v", path, err) return - }*/ - - // TODO: Support working with other MappedFileFinder users simultaneously, and start populating - // the checksum accordingly, by using Stat like above, but taking into account that there might - // not be a previous mapping, in which case one needs to create that first. - - s.fileFinder.SetMapping(ctx, id, unstructured.ChecksumPath{ - Path: path, - //Checksum: oi.Checksum(), + } + // Register the current state in the cache + s.MappedFileFinder().SetMapping(ctx, id, unstructured.ChecksumPath{ + Path: path, + Checksum: checksum, }) } // deleteMapping removes a mapping a file that doesn't exist -func (s *GenericUnstructuredEventStorage) deleteMapping(ctx context.Context, id core.UnversionedObjectID) { - s.fileFinder.DeleteMapping(ctx, id) +func (s *Generic) deleteMapping(ctx context.Context, id core.UnversionedObjectID) { + s.MappedFileFinder().DeleteMapping(ctx, id) } diff --git a/pkg/storage/filesystem/watch/manifest/manifest.go b/pkg/storage/filesystem/watch/manifest/manifest.go deleted file mode 100644 index 9d0786a6..00000000 --- a/pkg/storage/filesystem/watch/manifest/manifest.go +++ /dev/null @@ -1,35 +0,0 @@ -package manifest - -import ( - "github.com/weaveworks/libgitops/pkg/storage/core" - "github.com/weaveworks/libgitops/pkg/storage/filesystem" - "github.com/weaveworks/libgitops/pkg/storage/filesystem/unstructured" - "github.com/weaveworks/libgitops/pkg/storage/filesystem/watch" - "github.com/weaveworks/libgitops/pkg/storage/filesystem/watch/inotify" -) - -// NewManifestStorage is a high-level constructor for a generic -// MappedFileFinder and filesystem.Storage, together with a -// inotify FileWatcher; all combined into an UnstructuredEventStorage. -func NewManifestStorage( - dir string, - contentTyper filesystem.ContentTyper, - namespacer core.Namespacer, - recognizer core.ObjectRecognizer, - pathExcluder filesystem.PathExcluder, -) (watch.UnstructuredEventStorage, error) { - fs := filesystem.NewOSFilesystem(dir) - fileFinder := unstructured.NewGenericMappedFileFinder(contentTyper, fs) - fsRaw, err := filesystem.NewGeneric(fileFinder, namespacer) - if err != nil { - return nil, err - } - emitter, err := inotify.NewFileWatcher(dir, &inotify.FileWatcherOptions{ - ContentTyper: contentTyper, - PathExcluder: pathExcluder, - }) - if err != nil { - return nil, err - } - return watch.NewGenericUnstructuredEventStorage(fsRaw, recognizer, emitter, true) -} From 567b5bf9fce57ab168e99f4e8c47481f0424ea0c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Sun, 24 Jan 2021 01:27:43 +0200 Subject: [PATCH 060/149] Misc changes: added various comments for future reference, removed unused code, and small code additions. --- pkg/serializer/decode.go | 4 ++++ pkg/serializer/frame_reader.go | 3 ++- pkg/serializer/options.go | 3 +++ pkg/serializer/patch.go | 2 ++ pkg/serializer/serializer.go | 27 --------------------------- pkg/storage/core/errors.go | 8 ++++++++ pkg/storage/core/interfaces.go | 1 + pkg/storage/kube/namespaces.go | 2 ++ pkg/storage/objectinfo.go | 29 ----------------------------- pkg/storage/utils.go | 23 +++++++++++++++++++++++ 10 files changed, 45 insertions(+), 57 deletions(-) delete mode 100644 pkg/storage/objectinfo.go create mode 100644 pkg/storage/utils.go diff --git a/pkg/serializer/decode.go b/pkg/serializer/decode.go index bcc9b280..7aee5af4 100644 --- a/pkg/serializer/decode.go +++ b/pkg/serializer/decode.go @@ -16,6 +16,10 @@ import ( // This is the groupversionkind for the v1.List object var listGVK = metav1.Unversioned.WithKind("List") +// TODO: To think about: should we take in the DecodeOptions at Decode time instead +// as a variadic-sized Option slice? It would probably take caching the *json.Serializer +// and runtime.Decoder for the given options they use, though. + func newDecoder(schemeAndCodec *schemeAndCodec, opts DecodeOptions) Decoder { // Allow both YAML and JSON inputs (JSON is a subset of YAML), and deserialize in strict mode s := json.NewSerializerWithOptions(json.DefaultMetaFactory, schemeAndCodec.scheme, schemeAndCodec.scheme, json.SerializerOptions{ diff --git a/pkg/serializer/frame_reader.go b/pkg/serializer/frame_reader.go index aee975fe..a2ba308d 100644 --- a/pkg/serializer/frame_reader.go +++ b/pkg/serializer/frame_reader.go @@ -182,7 +182,8 @@ func FromBytes(content []byte) ReadCloser { // the specified content type. This avoids overhead if it is known that the // byte array only contains one frame. The given frame is returned in // whole in the first ReadFrame() call, and io.EOF is returned in all future -// invocations. +// invocations. This FrameReader works for any ContentType and transparently +// exposes what was given through the ContentType() method. func NewSingleFrameReader(b []byte, ct ContentType) FrameReader { return &singleFrameReader{ ct: ct, diff --git a/pkg/serializer/options.go b/pkg/serializer/options.go index fc1a4547..6f56e8e9 100644 --- a/pkg/serializer/options.go +++ b/pkg/serializer/options.go @@ -2,6 +2,8 @@ package serializer import "github.com/weaveworks/libgitops/pkg/util" +// TODO: Import k8s.io/utils/pointer instead of baking our own ptrutils package. + type EncodeOption interface { ApplyToEncode(*EncodeOptions) } @@ -21,6 +23,7 @@ type EncodeOptions struct { // // Default: 2, i.e. pretty output // TODO: Make this a property of the FrameWriter instead? + // TODO: Use a typed size of the int, e.g. int32? JSONIndent *int // Whether to preserve YAML comments internally. diff --git a/pkg/serializer/patch.go b/pkg/serializer/patch.go index e44e589b..bd580e05 100644 --- a/pkg/serializer/patch.go +++ b/pkg/serializer/patch.go @@ -11,6 +11,8 @@ import ( openapi "k8s.io/kube-openapi/pkg/util/proto" ) +// TODO: Move pkg/util/patch under pkg/serializer? + type Patcher interface { // ApplyOnStruct applies the given patch (JSON-encoded) using the given BytePatcher // (that knows how to operate on that kind of patch type) into obj. diff --git a/pkg/serializer/serializer.go b/pkg/serializer/serializer.go index 97bc5524..fbbcdd1f 100644 --- a/pkg/serializer/serializer.go +++ b/pkg/serializer/serializer.go @@ -8,7 +8,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" k8sserializer "k8s.io/apimachinery/pkg/runtime/serializer" - "sigs.k8s.io/yaml" ) // ContentType specifies a content type for Encoders, Decoders, FrameWriters and FrameReaders @@ -37,34 +36,8 @@ type ContentTyped interface { ContentType() ContentType } -// JSONTransformer is an interface for transforming bytes to JSON from -// a content-type specific implementation. -type JSONTransformer interface { - ContentTyped - // TransformToJSON takes bytes of the supported ContentType, and - // returns JSON bytes. - TransformToJSON([]byte) ([]byte, error) -} - -// ContentType implements JSONTransformer -var _ JSONTransformer = ContentType("") - func (ct ContentType) ContentType() ContentType { return ct } -// TransformToJSON takes bytes of the supported ContentType, and -// returns JSON bytes. -func (ct ContentType) TransformToJSON(in []byte) ([]byte, error) { - // If the given content type already is JSON, then we're all good - switch ct { - case ContentTypeJSON: - return in, nil - case ContentTypeYAML: - return yaml.YAMLToJSONStrict(in) - default: - return nil, fmt.Errorf("%w: cannot transform %s to JSON", ErrUnsupportedContentType, ct) - } -} - // Serializer is an interface providing high-level decoding/encoding functionality // for types registered in a *runtime.Scheme type Serializer interface { diff --git a/pkg/storage/core/errors.go b/pkg/storage/core/errors.go index da0c97d7..1c955c00 100644 --- a/pkg/storage/core/errors.go +++ b/pkg/storage/core/errors.go @@ -1,11 +1,19 @@ package core import ( + goerrors "errors" + "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/validation/field" ) +var ( + // ErrNotImplemented can be returned for implementers that do not + // implement a specific part of an interface. + ErrNotImplemented = goerrors.New("not implemented") +) + // StatusError is an error that supports also conversion // to a metav1.Status struct for more detailed information. type StatusError interface { diff --git a/pkg/storage/core/interfaces.go b/pkg/storage/core/interfaces.go index a60670e1..3cc1a3ac 100644 --- a/pkg/storage/core/interfaces.go +++ b/pkg/storage/core/interfaces.go @@ -16,6 +16,7 @@ import ( // GroupVersionKind aliases type GroupKind = schema.GroupKind +type GroupVersion = schema.GroupVersion type GroupVersionKind = schema.GroupVersionKind // Client-related Object aliases diff --git a/pkg/storage/kube/namespaces.go b/pkg/storage/kube/namespaces.go index 8a544571..5643576f 100644 --- a/pkg/storage/kube/namespaces.go +++ b/pkg/storage/kube/namespaces.go @@ -16,6 +16,8 @@ import ( // or FileFinder's objects, and just reads them, converts them into the current // hub version. +// TODO: Make a composite Storage that encrypts secrets using a key + var ( // ErrNoSuchNamespace means that the set of namespaces was searched in the // system, but the requested namespace wasn't in that list. diff --git a/pkg/storage/objectinfo.go b/pkg/storage/objectinfo.go deleted file mode 100644 index 7ddc78cb..00000000 --- a/pkg/storage/objectinfo.go +++ /dev/null @@ -1,29 +0,0 @@ -package storage - -import ( - "github.com/weaveworks/libgitops/pkg/serializer" - "github.com/weaveworks/libgitops/pkg/storage/core" -) - -func NewObjectInfo(ct serializer.ContentType, checksum string, filepath string, id core.UnversionedObjectID) ObjectInfo { - return &objectInfo{ - ct: ct, - checksum: checksum, - filepath: filepath, - id: id, - } -} - -var _ ObjectInfo = &objectInfo{} - -type objectInfo struct { - ct serializer.ContentType - checksum string - filepath string - id core.UnversionedObjectID -} - -func (o *objectInfo) ContentType() serializer.ContentType { return o.ct } -func (o *objectInfo) Checksum() string { return o.checksum } -func (o *objectInfo) Path() string { return o.filepath } -func (o *objectInfo) ID() core.UnversionedObjectID { return o.id } diff --git a/pkg/storage/utils.go b/pkg/storage/utils.go new file mode 100644 index 00000000..d45323bd --- /dev/null +++ b/pkg/storage/utils.go @@ -0,0 +1,23 @@ +package storage + +import ( + "fmt" + + "github.com/weaveworks/libgitops/pkg/storage/core" +) + +// VerifyNamespaced verifies that the given GroupKind and namespace parameter follows +// the rule of the Namespacer. +func VerifyNamespaced(namespacer core.Namespacer, gk core.GroupKind, ns string) error { + // Get namespacing info + namespaced, err := namespacer.IsNamespaced(gk) + if err != nil { + return err + } + if namespaced && ns == "" { + return fmt.Errorf("%w: namespaced kind %v requires non-empty namespace", ErrNamespacedMismatch, gk) + } else if !namespaced && ns != "" { + return fmt.Errorf("%w: non-namespaced kind %v must not have namespace parameter set", ErrNamespacedMismatch, gk) + } + return nil +} From 701c40cb6adf102817b44ec0263bceb1e6c914fd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 25 Jan 2021 14:53:43 +0200 Subject: [PATCH 061/149] Move the NamespaceEnforcer interface and the sample implementation to the backend. Make the sample implementation more generic, and the Kubernetes implementation a "special configuration" of it. --- pkg/storage/backend/backend.go | 40 ++++------- pkg/storage/backend/enforcer.go | 116 ++++++++++++++++++++++++++++++++ pkg/storage/core/errors.go | 4 ++ pkg/storage/core/interfaces.go | 18 ----- pkg/storage/kube/namespaces.go | 66 +++++++++--------- 5 files changed, 163 insertions(+), 81 deletions(-) create mode 100644 pkg/storage/backend/enforcer.go diff --git a/pkg/storage/backend/backend.go b/pkg/storage/backend/backend.go index 442698d5..39d769ec 100644 --- a/pkg/storage/backend/backend.go +++ b/pkg/storage/backend/backend.go @@ -20,8 +20,6 @@ var ( // ErrNameRequired is returned when .metadata.name is unset // TODO: Support generateName? ErrNameRequired = errors.New(".metadata.name is required") - - namespaceGVK = core.GroupVersionKind{Group: "", Version: "v1", Kind: "Namespace"} ) // TODO: Make a *core.Unknown that has @@ -34,7 +32,7 @@ var ( type Accessors interface { Storage() storage.Storage - NamespaceEnforcer() core.NamespaceEnforcer + NamespaceEnforcer() NamespaceEnforcer Scheme() *runtime.Scheme } @@ -92,7 +90,7 @@ type StorageVersioner interface { func NewGeneric( storage storage.Storage, serializer serializer.Serializer, // TODO: only scheme required, encode/decode optional? - enforcer core.NamespaceEnforcer, + enforcer NamespaceEnforcer, validator Validator, // TODO: optional? versioner StorageVersioner, // TODO: optional? ) (*Generic, error) { @@ -126,7 +124,7 @@ type Generic struct { encoder serializer.Encoder storage storage.Storage - enforcer core.NamespaceEnforcer + enforcer NamespaceEnforcer validator Validator versioner StorageVersioner } @@ -139,7 +137,7 @@ func (b *Generic) Storage() storage.Storage { return b.storage } -func (b *Generic) NamespaceEnforcer() core.NamespaceEnforcer { +func (b *Generic) NamespaceEnforcer() NamespaceEnforcer { return b.enforcer } @@ -316,27 +314,15 @@ func (b *Generic) idForObj(ctx context.Context, obj core.Object) (core.ObjectID, return nil, ErrNameRequired } - // Check if the GroupKind is namespaced - namespaced, err := b.storage.Namespacer().IsNamespaced(gvk.GroupKind()) - if err != nil { - return nil, err - } - - var namespaces sets.String - // If the namespace enforcer requires listing all the other namespaces, - // look them up - if b.enforcer.RequireSetNamespaceExists() { - objIDs, err := b.storage.ListObjectIDs(ctx, namespaceGVK.GroupKind(), "") - if err != nil { - return nil, err - } - namespaces = sets.NewString() - for _, id := range objIDs { - namespaces.Insert(id.ObjectKey().Name) - } - } - // Enforce the given namespace policy. This might mutate obj - if err := b.enforcer.EnforceNamespace(obj, namespaced, namespaces); err != nil { + // Enforce the given namespace policy. This might mutate obj. + // TODO: disallow "upcasting" the Lister to a full-blown Storage? + if err := b.enforcer.EnforceNamespace( + ctx, + obj, + gvk, + b.Storage().Namespacer(), + b.Storage(), + ); err != nil { return nil, err } diff --git a/pkg/storage/backend/enforcer.go b/pkg/storage/backend/enforcer.go new file mode 100644 index 00000000..8553283e --- /dev/null +++ b/pkg/storage/backend/enforcer.go @@ -0,0 +1,116 @@ +package backend + +import ( + "context" + "errors" + "fmt" + + "github.com/weaveworks/libgitops/pkg/storage" + "github.com/weaveworks/libgitops/pkg/storage/core" +) + +var ( + // ErrNoSuchNamespace means that the set of namespaces was searched in the + // system, but the requested namespace wasn't in that list. + ErrNoSuchNamespace = errors.New("no such namespace in the system") +) + +// NamespaceEnforcer enforces a namespace policy for the Backend. +type NamespaceEnforcer interface { + // EnforceNamespace makes sure that: + // a) Any namespaced object has a non-empty namespace field after this call + // b) Any non-namespaced object has an empty namespace field after this call + // c) The applicable namespace policy of the user's liking is enforced (e.g. + // that there are only certain valid namespaces that can be used). + // + // This call is allowed to mutate obj. gvk represents the GroupVersionKind + // of obj. The namespacer can be used to figure out if the given object is + // namespaced or not. The given lister might be used to list object IDs, + // or existing namespaces in the system. + // + // See GenericNamespaceEnforcer for an example implementation, or + // pkg/storage/kube.NewNamespaceEnforcer() for a sample application. + EnforceNamespace(ctx context.Context, obj core.Object, gvk core.GroupVersionKind, namespacer core.Namespacer, lister storage.Lister) error +} + +// GenericNamespaceEnforcer is a NamespaceEnforcer that: +// a) sets a default namespace for namespaced objects that have +// the namespace field left empty +// b) makes sure non-namespaced objects do not have the namespace +// field set, by pruning any previously-set value. +// c) if NamespaceGroupKind is non-nil; lists valid Namespace objects +// in the system (of the given GroupKind); and matches namespaced +// objects' namespace field against the listed Namespace objects' +// .metadata.name field. +// +// For an example of how to configure this enforcer in the way +// Kubernetes itself (approximately) does, see pkg/storage/kube. +// NewNamespaceEnforcer(). +type GenericNamespaceEnforcer struct { + // DefaultNamespace describes the default namespace string + // that should be set, if a namespaced object's namespace + // field is empty. + // +required + DefaultNamespace string + // NamespaceGroupKind describes the GroupKind for Namespace + // objects in the system. If non-nil, objects with such + // GroupKind are listed, and their .metadata.name is matched + // against the current object's namespace field. If nil, any + // namespace value is considered valid. + // +optional + NamespaceGroupKind *core.GroupKind +} + +func (e GenericNamespaceEnforcer) EnforceNamespace(ctx context.Context, obj core.Object, gvk core.GroupVersionKind, namespacer core.Namespacer, lister storage.Lister) error { + // Get namespacing info + namespaced, err := namespacer.IsNamespaced(gvk.GroupKind()) + if err != nil { + return err + } + + // Enforce generic rules + ns := obj.GetNamespace() + if !namespaced { + // If a namespace was set, it must be sanitized, as non-namespaced + // resources must have namespace field empty. + if len(ns) != 0 { + obj.SetNamespace("") + } + return nil + } + // The resource is namespaced. + // If it is empty, set it to the default namespace. + if len(ns) == 0 { + // Verify that DefaultNamespace is non-empty + if len(e.DefaultNamespace) == 0 { + return fmt.Errorf("GenericNamespaceEnforcer.DefaultNamespace is mandatory: %w", core.ErrInvalidParameter) + } + // Mutate obj and set the namespace field to the default, then return + obj.SetNamespace(e.DefaultNamespace) + return nil + } + + // If the namespace field is set, but NamespaceGroupKind is + // nil, it means that any non-empty namespace value is + // valid. + if e.NamespaceGroupKind == nil { + return nil + } + + // However, if a Namespace GroupKind was given, look it up using + // the lister, and verify its .metadata.name matches the given + // namespace value. + objIDs, err := lister.ListObjectIDs(ctx, *e.NamespaceGroupKind, "") + if err != nil { + return err + } + // Loop through the IDs, and try to match it against the set ns + for _, id := range objIDs { + if id.ObjectKey().Name == ns { + // Found the namespace; this is a valid setting + return nil + } + } + // The set namespace doesn't belong to the set of valid namespaces, error + return fmt.Errorf("%w: %q", ErrNoSuchNamespace, ns) +} diff --git a/pkg/storage/core/errors.go b/pkg/storage/core/errors.go index 1c955c00..f65895a9 100644 --- a/pkg/storage/core/errors.go +++ b/pkg/storage/core/errors.go @@ -12,6 +12,10 @@ var ( // ErrNotImplemented can be returned for implementers that do not // implement a specific part of an interface. ErrNotImplemented = goerrors.New("not implemented") + // ErrInvalidParameter specifies that a given parameter + // (as a public struct field or function argument) was + // not valid according to the specification. + ErrInvalidParameter = goerrors.New("invalid parameter") ) // StatusError is an error that supports also conversion diff --git a/pkg/storage/core/interfaces.go b/pkg/storage/core/interfaces.go index 3cc1a3ac..208a6272 100644 --- a/pkg/storage/core/interfaces.go +++ b/pkg/storage/core/interfaces.go @@ -5,7 +5,6 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -36,23 +35,6 @@ type DeleteAllOfOption = client.DeleteAllOfOption // Helper functions from client. var ObjectKeyFromObject = client.ObjectKeyFromObject -// NamespaceEnforcer enforces a namespace policy for the Storage. -type NamespaceEnforcer interface { - // RequireSetNamespaceExists specifies whether the namespace must exist in the system. - // For example, Kubernetes requires this by default. - RequireSetNamespaceExists() bool - // EnforceNamespace operates on the object to make it conform with a given set of rules. - // If RequireNamespaceExists() is true, all the namespaces available in the system must - // be passed to namespaces. - // For example, Kubernetes enforces the following rules: - // Namespaced resources: - // If .metadata.namespace == "": .metadata.namespace = "default" - // If .metadata.namespace != "": Make sure there is such a namespace, and use it in that case - // Non-namespaced resources: - // If .metadata.namespace != "": .metadata.namespace = "" - EnforceNamespace(obj Object, namespaced bool, namespaces sets.String) error -} - // Namespacer is an interface that lets the caller know if a GroupKind is namespaced // or not. There are two ready-made implementations: // 1. RESTMapperToNamespacer diff --git a/pkg/storage/kube/namespaces.go b/pkg/storage/kube/namespaces.go index 5643576f..3e509ceb 100644 --- a/pkg/storage/kube/namespaces.go +++ b/pkg/storage/kube/namespaces.go @@ -1,15 +1,13 @@ package kube import ( - "errors" - "fmt" "sync" + "github.com/weaveworks/libgitops/pkg/storage/backend" "github.com/weaveworks/libgitops/pkg/storage/core" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/sets" ) // TODO: Make an example component that iterates through all of a raw.Storage's @@ -18,40 +16,36 @@ import ( // TODO: Make a composite Storage that encrypts secrets using a key -var ( - // ErrNoSuchNamespace means that the set of namespaces was searched in the - // system, but the requested namespace wasn't in that list. - ErrNoSuchNamespace = errors.New("no such namespace in the system") -) - -// NamespaceEnforcer implements core.NamespaceEnforcer similarly to how the -// Kubernetes API server behaves. -type NamespaceEnforcer struct{} - -var _ core.NamespaceEnforcer = NamespaceEnforcer{} - -func (NamespaceEnforcer) RequireSetNamespaceExists() bool { return true } - -func (NamespaceEnforcer) EnforceNamespace(obj core.Object, namespaced bool, namespaces sets.String) error { - ns := obj.GetNamespace() - if !namespaced { - // If a namespace was set, it should be sanitized. - if len(ns) != 0 { - obj.SetNamespace("") - } - return nil - } - // The resource is namespaced. - // If it is empty, set it to the default namespace. - if len(ns) == 0 { - obj.SetNamespace(metav1.NamespaceDefault) - return nil - } - // If the namespace field is set, but it doesn't exist in the set, error - if !namespaces.Has(ns) { - return fmt.Errorf("%w: %q", ErrNoSuchNamespace, ns) +// NewNamespaceEnforcer returns a backend.NamespaceEnforcer that +// enforces namespacing rules (approximately) in the same way as +// Kubernetes itself does. The following rules are applied: +// +// if object is namespaced { +// if .metadata.namespace == "" { +// .metadata.namespace = "default" +// } else { // .metadata.namespace != "" +// Make sure that such a v1.Namespace object +// exists in the system. +// } +// } else { // object is non-namespaced +// if .metadata.namespace != "" { +// .metadata.namespace = "" +// } +// } +// +// Underneath, backend.GenericNamespaceEnforcer is used. Refer +// to the documentation of that if you want the functionality +// to be slightly different. (e.g. any namespace value is valid). +// +// TODO: Maybe we want to validate the namespace string itself? +func NewNamespaceEnforcer() backend.NamespaceEnforcer { + return backend.GenericNamespaceEnforcer{ + DefaultNamespace: metav1.NamespaceDefault, + NamespaceGroupKind: &core.GroupKind{ + Group: "", // legacy name for the core API group + Kind: "Namespace", + }, } - return nil } // SimpleRESTMapper is a subset of the meta.RESTMapper interface From 82cb55173a27597faf8c4ff12812a399cb9f2e3f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Tue, 26 Jan 2021 01:26:24 +0200 Subject: [PATCH 062/149] Minor bugfixes and improvements --- pkg/storage/filesystem/unstructured/event/storage.go | 8 +++++--- pkg/storage/filesystem/unstructured/filefinder_mapped.go | 4 ++-- pkg/storage/filesystem/unstructured/storage.go | 2 +- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/pkg/storage/filesystem/unstructured/event/storage.go b/pkg/storage/filesystem/unstructured/event/storage.go index 59ea6cee..57936bc2 100644 --- a/pkg/storage/filesystem/unstructured/event/storage.go +++ b/pkg/storage/filesystem/unstructured/event/storage.go @@ -290,6 +290,8 @@ func (s *Generic) handleModifyMove(ctx context.Context, ev *fileevents.FileEvent // If the file was just moved around, just overwrite the earlier mapping if ev.Type == fileevents.FileEventMove { + // This assumes that the file content does not change in the move + // operation. TODO: document this as a requirement for the Emitter. s.setMapping(ctx, versionedID, ev.Path) // Internal move events are a no-op @@ -304,13 +306,13 @@ func (s *Generic) handleModifyMove(ctx context.Context, ev *fileevents.FileEvent objectEvent := event.ObjectEventUpdate // Set the mapping if it didn't exist before; assume this is a Create event if _, ok := s.MappedFileFinder().GetMapping(ctx, versionedID); !ok { - // Add a mapping between this object and path. - s.setMapping(ctx, versionedID, ev.Path) - // This is what actually determines if an Object is created, // so update the event to update.ObjectEventCreate here objectEvent = event.ObjectEventCreate } + // Update the mapping between this object and path (this updates + // the checksum underneath too). + s.setMapping(ctx, versionedID, ev.Path) // Send the event to the channel s.sendEvent(objectEvent, versionedID) return nil diff --git a/pkg/storage/filesystem/unstructured/filefinder_mapped.go b/pkg/storage/filesystem/unstructured/filefinder_mapped.go index 474919fa..274da22f 100644 --- a/pkg/storage/filesystem/unstructured/filefinder_mapped.go +++ b/pkg/storage/filesystem/unstructured/filefinder_mapped.go @@ -4,9 +4,9 @@ import ( "context" "errors" - "github.com/fluxcd/go-git-providers/validation" "github.com/weaveworks/libgitops/pkg/storage/core" "github.com/weaveworks/libgitops/pkg/storage/filesystem" + utilerrs "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" ) @@ -66,7 +66,7 @@ func (f *GenericMappedFileFinder) ObjectPath(ctx context.Context, id core.Unvers cp, ok := f.GetMapping(ctx, id) if !ok { // TODO: separate interface for "new creates"? - return "", &validation.MultiError{Errors: []error{ErrNotTracked, core.NewErrNotFound(id)}} + return "", utilerrs.NewAggregate([]error{ErrNotTracked, core.NewErrNotFound(id)}) } return cp.Path, nil } diff --git a/pkg/storage/filesystem/unstructured/storage.go b/pkg/storage/filesystem/unstructured/storage.go index f753ae8f..91097345 100644 --- a/pkg/storage/filesystem/unstructured/storage.go +++ b/pkg/storage/filesystem/unstructured/storage.go @@ -46,7 +46,7 @@ func (s *Generic) Sync(ctx context.Context) ([]ChecksumPathID, error) { ctx, fileFinder.Filesystem(), fileFinder.ContentTyper(), - s.pathExcluder, + s.PathExcluder(), ) if err != nil { return nil, err From 9e5262c94d600df9069b36eaa42a02e14e279902 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Tue, 26 Jan 2021 01:28:17 +0200 Subject: [PATCH 063/149] Make pathexcluder explicitely work without a filesystem abstraction; after all it should filter paths only, not files. --- pkg/storage/filesystem/dir_traversal.go | 2 +- pkg/storage/filesystem/path_excluder.go | 4 ++-- pkg/storage/filesystem/path_excluder_test.go | 2 +- pkg/storage/filesystem/unstructured/event/storage.go | 4 +++- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/pkg/storage/filesystem/dir_traversal.go b/pkg/storage/filesystem/dir_traversal.go index 6658292a..59668765 100644 --- a/pkg/storage/filesystem/dir_traversal.go +++ b/pkg/storage/filesystem/dir_traversal.go @@ -27,7 +27,7 @@ func ListValidFilesInFilesystem(ctx context.Context, fs Filesystem, contentTyper // that contentTyper recognizes, and is not a path that is excluded by pathExcluder. func IsValidFileInFilesystem(ctx context.Context, fs Filesystem, contentTyper ContentTyper, pathExcluder PathExcluder, file string) bool { // return false if this path should be excluded - if pathExcluder.ShouldExcludePath(ctx, fs, file) { + if pathExcluder.ShouldExcludePath(ctx, file) { return false } diff --git a/pkg/storage/filesystem/path_excluder.go b/pkg/storage/filesystem/path_excluder.go index 9c4f3c03..cb358648 100644 --- a/pkg/storage/filesystem/path_excluder.go +++ b/pkg/storage/filesystem/path_excluder.go @@ -13,7 +13,7 @@ type PathExcluder interface { // ShouldExcludePath takes in a context, the fs filesystem abstraction, // and a relative path to the file which should be determined if it should // be excluded or not. - ShouldExcludePath(ctx context.Context, fs Filesystem, path string) bool + ShouldExcludePath(ctx context.Context, path string) bool } // ExcludeGitDirectory implements PathExcluder. @@ -23,7 +23,7 @@ var _ PathExcluder = ExcludeGitDirectory{} // all files under a ".git" directory, anywhere in the tree under the root directory. type ExcludeGitDirectory struct{} -func (ExcludeGitDirectory) ShouldExcludePath(_ context.Context, _ Filesystem, path string) bool { +func (ExcludeGitDirectory) ShouldExcludePath(_ context.Context, path string) bool { // Always start from a clean path path = filepath.Clean(path) for { diff --git a/pkg/storage/filesystem/path_excluder_test.go b/pkg/storage/filesystem/path_excluder_test.go index d253dbdf..46c7c039 100644 --- a/pkg/storage/filesystem/path_excluder_test.go +++ b/pkg/storage/filesystem/path_excluder_test.go @@ -61,7 +61,7 @@ func TestExcludeGitDirectory_ShouldExcludePath(t *testing.T) { ctx := context.Background() for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := e.ShouldExcludePath(ctx, nil, tt.path); got != tt.want { + if got := e.ShouldExcludePath(ctx, tt.path); got != tt.want { t.Errorf("ExcludeGitDirectory.ShouldExcludePath() = %v, want %v", got, tt.want) } }) diff --git a/pkg/storage/filesystem/unstructured/event/storage.go b/pkg/storage/filesystem/unstructured/event/storage.go index 57936bc2..ef64dec8 100644 --- a/pkg/storage/filesystem/unstructured/event/storage.go +++ b/pkg/storage/filesystem/unstructured/event/storage.go @@ -44,7 +44,9 @@ func NewManifest( if err != nil { return nil, err } - emitter, err := inotify.NewFileWatcher(dir) + emitter, err := inotify.NewFileWatcher(dir, &inotify.FileWatcherOptions{ + PathExcluder: pathExcluder, + }) if err != nil { return nil, err } From 38c5d707de57e8395485d79567b098fcc4de0c3b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Tue, 26 Jan 2021 01:29:03 +0200 Subject: [PATCH 064/149] Add the PathExcluder back to FileWatcher, to avoid putting a lot of burden on it watching git files fly around. --- pkg/storage/filesystem/fileevents/inotify/filewatcher.go | 6 ++++++ pkg/storage/filesystem/fileevents/inotify/options.go | 9 +++++++++ 2 files changed, 15 insertions(+) diff --git a/pkg/storage/filesystem/fileevents/inotify/filewatcher.go b/pkg/storage/filesystem/fileevents/inotify/filewatcher.go index 3a7171da..4c0f8b13 100644 --- a/pkg/storage/filesystem/fileevents/inotify/filewatcher.go +++ b/pkg/storage/filesystem/fileevents/inotify/filewatcher.go @@ -117,6 +117,8 @@ func (w *FileWatcher) monitorFunc() { defer log.Debug("FileWatcher: Monitoring thread stopped") defer close(w.outbound) // Close the update stream after the FileWatcher has stopped + ctx := context.Background() + for { event, ok := <-w.inbound if !ok { @@ -127,6 +129,10 @@ func (w *FileWatcher) monitorFunc() { continue // Skip directories } + if w.opts.PathExcluder.ShouldExcludePath(ctx, event.Path()) { + continue // Skip ignored files + } + // Get any events registered for the specific file, and append the specified event var eventList notifyEvents if val, ok := w.batcher.Load(event.Path()); ok { diff --git a/pkg/storage/filesystem/fileevents/inotify/options.go b/pkg/storage/filesystem/fileevents/inotify/options.go index dbf52e95..d724e75c 100644 --- a/pkg/storage/filesystem/fileevents/inotify/options.go +++ b/pkg/storage/filesystem/fileevents/inotify/options.go @@ -2,6 +2,8 @@ package inotify import ( "time" + + "github.com/weaveworks/libgitops/pkg/storage/filesystem" ) // How many inotify events we can buffer before watching is interrupted @@ -23,6 +25,9 @@ type FileWatcherOptions struct { // before watching is interrupted/delayed. // Default: DefaultEventBufferSize EventBufferSize int32 + // PathExcluder provides a way to exclude paths. + // Default: filesystem.ExcludeGitDirectory{} + PathExcluder filesystem.PathExcluder } func (o *FileWatcherOptions) ApplyToFileWatcher(target *FileWatcherOptions) { @@ -32,6 +37,9 @@ func (o *FileWatcherOptions) ApplyToFileWatcher(target *FileWatcherOptions) { if o.EventBufferSize != 0 { target.EventBufferSize = o.EventBufferSize } + if o.PathExcluder != nil { + target.PathExcluder = o.PathExcluder + } } func (o *FileWatcherOptions) ApplyOptions(opts []FileWatcherOption) *FileWatcherOptions { @@ -46,5 +54,6 @@ func defaultOptions() *FileWatcherOptions { return &FileWatcherOptions{ BatchTimeout: 1 * time.Second, EventBufferSize: DefaultEventBufferSize, + PathExcluder: filesystem.ExcludeGitDirectory{}, } } From b04de273d9d9c2c777599a618cd49022bb5c283b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Tue, 26 Jan 2021 18:16:39 +0200 Subject: [PATCH 065/149] Check in the first revision of the transactional client. --- pkg/storage/client/transactional/client.go | 322 ++++++++++++++++++ pkg/storage/client/transactional/commit.go | 126 +++++++ pkg/storage/client/transactional/handlers.go | 75 ++++ .../client/transactional/interfaces.go | 80 +++++ pkg/storage/client/transactional/options.go | 66 ++++ pkg/storage/client/transactional/tx.go | 24 ++ pkg/storage/client/transactional/tx_branch.go | 71 ++++ pkg/storage/client/transactional/tx_common.go | 76 +++++ pkg/storage/client/transactional/tx_ops.go | 105 ++++++ pkg/storage/client/transactional/utils.go | 21 ++ 10 files changed, 966 insertions(+) create mode 100644 pkg/storage/client/transactional/client.go create mode 100644 pkg/storage/client/transactional/commit.go create mode 100644 pkg/storage/client/transactional/handlers.go create mode 100644 pkg/storage/client/transactional/interfaces.go create mode 100644 pkg/storage/client/transactional/options.go create mode 100644 pkg/storage/client/transactional/tx.go create mode 100644 pkg/storage/client/transactional/tx_branch.go create mode 100644 pkg/storage/client/transactional/tx_common.go create mode 100644 pkg/storage/client/transactional/tx_ops.go create mode 100644 pkg/storage/client/transactional/utils.go diff --git a/pkg/storage/client/transactional/client.go b/pkg/storage/client/transactional/client.go new file mode 100644 index 00000000..f0c716a9 --- /dev/null +++ b/pkg/storage/client/transactional/client.go @@ -0,0 +1,322 @@ +package transactional + +import ( + "context" + "fmt" + "strings" + "sync" + "sync/atomic" + + "github.com/sirupsen/logrus" + "github.com/weaveworks/libgitops/pkg/storage/backend" + "github.com/weaveworks/libgitops/pkg/storage/client" + "github.com/weaveworks/libgitops/pkg/storage/core" + "github.com/weaveworks/libgitops/pkg/util" + utilerrs "k8s.io/apimachinery/pkg/util/errors" +) + +var _ Client = &Generic{} + +func NewGeneric(c client.Client, manager BranchManager, merger BranchMerger) (Client, error) { + if c == nil { + return nil, fmt.Errorf("%w: c is required", core.ErrInvalidParameter) + } + if manager == nil { + return nil, fmt.Errorf("%w: manager is required", core.ErrInvalidParameter) + } + return &Generic{ + c: c, + txs: make(map[string]*txLock), + txsMu: &sync.Mutex{}, + manager: manager, + merger: merger, + }, nil +} + +type Generic struct { + c client.Client + + txs map[string]*txLock + txsMu *sync.Mutex + + // +optional + merger BranchMerger + // +required + manager BranchManager +} + +type txLock struct { + mu *sync.RWMutex + mode TxMode + // active == 1 means "transaction active, mu is locked for writing" + // active == 0 means "transaction has stopped, mu has been unlocked" + active uint32 +} + +func (c *Generic) Get(ctx context.Context, key core.ObjectKey, obj core.Object) error { + return c.lockForReading(ctx, func() error { + return c.c.Get(ctx, key, obj) + }) +} + +func (c *Generic) List(ctx context.Context, list core.ObjectList, opts ...core.ListOption) error { + return c.lockForReading(ctx, func() error { + return c.c.List(ctx, list, opts...) + }) +} + +func (c *Generic) lockForReading(ctx context.Context, operation func() error) error { + ref := core.GetVersionRef(ctx) + switch ref.Type() { + case core.VersionRefTypeCommit: + // Never block reads for specific commits + return operation() + case core.VersionRefTypeBranch: + return c.readBranch(ref.String(), operation) + default: + return fmt.Errorf("%w: %s", core.ErrInvalidVersionRefType, ref.Type()) + } +} + +func (c *Generic) readBranch(branch string, callback func() error) error { + // Aquire the tx-specific lock + c.txsMu.Lock() + txState, ok := c.txs[branch] + if !ok { + // grow the txs map by one + c.txs[branch] = &txLock{ + mu: &sync.RWMutex{}, + } + txState = c.txs[branch] + } + c.txsMu.Unlock() + // During this period, no transactions can be started, + // only reads can be active + if txState.mode == TxModeAtomic { + txState.mu.RLock() + } + err := callback() + if txState.mode == TxModeAtomic { + txState.mu.RUnlock() + } + return err +} + +func (c *Generic) initTx(ctx context.Context, info TxInfo) (context.Context, txFunc) { + // Aquire the tx-specific lock + c.txsMu.Lock() + txState, ok := c.txs[info.Head] + if !ok { + // grow the txs map by one + c.txs[info.Head] = &txLock{ + mu: &sync.RWMutex{}, + } + txState = c.txs[info.Head] + } + txState.mode = info.Options.Mode + c.txsMu.Unlock() + + // Wait for all reads to complete (in the case of the atomic more), + // and then lock for writing. For non-atomic mode this uses the mutex + // as it is modifying txState, and two transactions must not run at + // the same time for the same branch. + // + // Always lock mu when a transaction is running on this branch, + // regardless of mode. If atomic mode is enabled, this also waits + // on any reads happening at this moment. For all modes, this ensures + // transactions happen in order. + txState.mu.Lock() + txState.active = 1 // set tx state to "active" + + // Create a child context with a timeout + dlCtx, cleanupTimeout := context.WithTimeout(ctx, info.Options.Timeout) + + // This function cleans up the transaction, and unlocks the tx muted + cleanupFunc := func() error { + // Cleanup after the transaction + if err := c.cleanupAfterTx(ctx, &info); err != nil { + return fmt.Errorf("Failed to cleanup branch %s after tx: %v", info.Head, err) + } + // Unlock the mutex so new transactions can take place on this branch + txState.mu.Unlock() + return nil + } + + // Start waiting for the cancellation of the deadline context. + go func() { + // Wait for the context to either timeout or be cancelled + <-dlCtx.Done() + // This guard makes sure the cleanup function runs exactly + // once, regardless of transaction end cause. + if atomic.CompareAndSwapUint32(&txState.active, 1, 0) { + if err := cleanupFunc(); err != nil { + logrus.Errorf("Failed to cleanup after tx timeout: %v", err) + } + } + }() + + abortFunc := func() error { + // The transaction ended; the caller is either Abort() or + // at the end of a successful transaction. The cause of + // Abort() happening can also be a context cancellation. + // If the parent context was cancelled or timed out; this + // function and the above function race to set active => 0 + // Regardless, due to the atomic nature of the operation, + // cleanupFunc() will only be run twice. + if atomic.CompareAndSwapUint32(&txState.active, 1, 0) { + // We can now stop the timeout timer + cleanupTimeout() + // Clean up the transaction + return cleanupFunc() + } + return nil + } + + return dlCtx, abortFunc +} + +func (c *Generic) cleanupAfterTx(ctx context.Context, info *TxInfo) error { + // Always run both the "clean branch" command... + errs := []error{c.manager.ResetToCleanBranch(ctx, info.Base)} + if c.manager.TransactionHandler() != nil { + // ... and the post-transaction command + // TODO: should this be in its own goroutine to switch back to main + // ASAP? + errs = append(errs, + c.manager.TransactionHandler().HandlePostTransaction(ctx, *info)) + } + // Return an aggregate error + return utilerrs.NewAggregate(errs) +} + +func (c *Generic) BackendReader() backend.Reader { + return c.c.BackendReader() +} + +func (c *Generic) BranchMerger() BranchMerger { + return c.merger +} + +func (c *Generic) BranchManager() BranchManager { + return c.manager +} + +func (c *Generic) Transaction(ctx context.Context, opts ...TxOption) Tx { + tx, err := c.transaction(ctx, opts...) + if err != nil { + panic(err) + } + return tx +} + +func (c *Generic) BranchTransaction(ctx context.Context, headBranch string, opts ...TxOption) BranchTx { + tx, err := c.branchTransaction(ctx, headBranch, opts...) + if err != nil { + panic(err) + } + return tx +} + +func (c *Generic) validateCtx(ctx context.Context) (core.VersionRef, error) { + // Check so versionref isn't set here + ref := core.GetVersionRef(ctx) + switch ref.Type() { + case core.VersionRefTypeCommit: + return nil, fmt.Errorf("must not give a VersionRef of type Commit to (Branch)Transaction()") + case core.VersionRefTypeBranch: + return ref, nil + default: + return nil, fmt.Errorf("%w: %s", core.ErrInvalidVersionRefType, ref.Type()) + } +} + +func (c *Generic) transaction(ctx context.Context, opts ...TxOption) (Tx, error) { + // Validate the versionref from the context + ref, err := c.validateCtx(ctx) + if err != nil { + return nil, err + } + + // Parse options + o := defaultTxOptions().ApplyOptions(opts) + + branch := ref.String() + info := TxInfo{ + Base: branch, + Head: branch, + Options: *o, + } + // Initialize the transaction + ctxWithDeadline, cleanupFunc := c.initTx(ctx, info) + + // Run pre-tx checks + err = c.manager.TransactionHandler().HandlePreTransaction(ctxWithDeadline, info) + + return &txImpl{ + &txCommon{ + err: err, + c: c.c, + manager: c.manager, + ctx: ctxWithDeadline, + info: info, + cleanupFunc: cleanupFunc, + }, + }, nil +} + +func (c *Generic) branchTransaction(ctx context.Context, headBranch string, opts ...TxOption) (BranchTx, error) { + // Validate the versionref from the context + ref, err := c.validateCtx(ctx) + if err != nil { + return nil, err + } + baseBranch := ref.String() + + // Append random bytes to the end of the head branch if it ends with a dash + if strings.HasSuffix(headBranch, "-") { + suffix, err := util.RandomSHA(4) + if err != nil { + return nil, err + } + headBranch += suffix + } + + // Validate that the base and head branches are distinct + if baseBranch == headBranch { + return nil, fmt.Errorf("head and target branches must not be the same") + } + + logrus.Debugf("Base branch: %q. Head branch: %q.", baseBranch, headBranch) + + // Parse options + o := defaultTxOptions().ApplyOptions(opts) + + info := TxInfo{ + Base: baseBranch, + Head: headBranch, + Options: *o, + } + + // Register the head branch with the context + ctxWithHeadBranch := core.WithVersionRef(ctx, core.NewBranchRef(headBranch)) + // Initialize the transaction + ctxWithDeadline, cleanupFunc := c.initTx(ctxWithHeadBranch, info) + + // Run pre-tx checks and create the new branch + err = utilerrs.NewAggregate([]error{ + c.manager.TransactionHandler().HandlePreTransaction(ctxWithDeadline, info), + c.manager.CreateBranch(ctxWithDeadline, headBranch), + }) + + return &txBranchImpl{ + txCommon: &txCommon{ + err: err, + c: c.c, + manager: c.manager, + ctx: ctxWithDeadline, + info: info, + cleanupFunc: cleanupFunc, + }, + merger: c.merger, + }, nil +} diff --git a/pkg/storage/client/transactional/commit.go b/pkg/storage/client/transactional/commit.go new file mode 100644 index 00000000..eeb5e9fa --- /dev/null +++ b/pkg/storage/client/transactional/commit.go @@ -0,0 +1,126 @@ +package transactional + +import ( + "fmt" + + "github.com/fluxcd/go-git-providers/validation" +) + +// Commit describes a result of a transaction. +type Commit interface { + // GetAuthor describes the author of this commit. + // +required + GetAuthor() CommitAuthor + // GetMessage describes the change in this commit. + // +required + GetMessage() CommitMessage + // Validate validates that all required fields are set, and given data is valid. + Validate() error +} + +type CommitAuthor interface { + // GetName describes the author's name (e.g. as per git config) + // +required + GetName() string + // GetEmail describes the author's email (e.g. as per git config). + // It is optional generally, but might be required by some specific + // implementations. + // +optional + GetEmail() string + // The String() method must return a (ideally both human- and machine- + // readable) concatenated string including the name and email (if + // applicable) of the author. + fmt.Stringer +} + +type CommitMessage interface { + // GetTitle describes the change concisely, so it can be used e.g. as + // a commit message or PR title. Certain implementations might enforce + // character limits on this string. + // +required + GetTitle() string + // GetDescription contains optional extra, more detailed information + // about the change. + // +optional + GetDescription() string + // The String() method must return a (ideally both human- and machine- + // readable) concatenated string including the title and description + // (if applicable) of the author. + fmt.Stringer +} + +// GenericCommitResult implements Commit. +var _ Commit = GenericCommit{} + +// GenericCommit implements Commit. +type GenericCommit struct { + // GetAuthor describes the author of this commit. + // +required + Author CommitAuthor + // GetMessage describes the change in this commit. + // +required + Message CommitMessage +} + +func (r GenericCommit) GetAuthor() CommitAuthor { return r.Author } +func (r GenericCommit) GetMessage() CommitMessage { return r.Message } + +func (r GenericCommit) Validate() error { + v := validation.New("GenericCommit") + if len(r.Author.GetName()) == 0 { + v.Required("Author.GetName") + } + if len(r.Message.GetTitle()) == 0 { + v.Required("Message.GetTitle") + } + return v.Error() +} + +// GenericCommitAuthor implements CommitAuthor. +var _ CommitAuthor = GenericCommitAuthor{} + +// GenericCommit implements Commit. +type GenericCommitAuthor struct { + // Name describes the author's name (as per git config) + // +required + Name string + // Email describes the author's email (as per git config) + // +optional + Email string +} + +func (r GenericCommitAuthor) GetName() string { return r.Name } +func (r GenericCommitAuthor) GetEmail() string { return r.Email } + +func (r GenericCommitAuthor) String() string { + if len(r.Email) != 0 { + return fmt.Sprintf("%s <%s>", r.Name, r.Email) + } + return r.Name +} + +// GenericCommitMessage implements CommitMessage. +var _ CommitMessage = GenericCommitMessage{} + +// GenericCommitMessage implements CommitMessage. +type GenericCommitMessage struct { + // Title describes the change concisely, so it can be used e.g. as + // a commit message or PR title. Certain implementations might enforce + // character limits on this string. + // +required + Title string + // Description contains optional extra, more detailed information + // about the change. + // +optional + Description string +} + +func (r GenericCommitMessage) GetTitle() string { return r.Title } +func (r GenericCommitMessage) GetDescription() string { return r.Description } + +func (r GenericCommitMessage) String() string { + if len(r.Description) != 0 { + return fmt.Sprintf("%s\n\n%s", r.Title, r.Description) + } + return r.Title +} diff --git a/pkg/storage/client/transactional/handlers.go b/pkg/storage/client/transactional/handlers.go new file mode 100644 index 00000000..570a1739 --- /dev/null +++ b/pkg/storage/client/transactional/handlers.go @@ -0,0 +1,75 @@ +package transactional + +import "context" + +type TxInfo struct { + Base string + Head string + Options TxOptions +} + +type CommitHandler interface { + HandlePreCommit(ctx context.Context, commit Commit, info TxInfo) error + HandlePostCommit(ctx context.Context, commit Commit, info TxInfo) error +} + +type MultiCommitHandler struct { + CommitHandlers []CommitHandler +} + +func (m *MultiCommitHandler) HandlePreCommit(ctx context.Context, commit Commit, info TxInfo) error { + for _, ch := range m.CommitHandlers { + if ch == nil { + continue + } + if err := ch.HandlePreCommit(ctx, commit, info); err != nil { + return err + } + } + return nil +} + +func (m *MultiCommitHandler) HandlePostCommit(ctx context.Context, commit Commit, info TxInfo) error { + for _, ch := range m.CommitHandlers { + if ch == nil { + continue + } + if err := ch.HandlePostCommit(ctx, commit, info); err != nil { + return err + } + } + return nil +} + +type TransactionHandler interface { + HandlePreTransaction(ctx context.Context, info TxInfo) error + HandlePostTransaction(ctx context.Context, info TxInfo) error +} + +type MultiTransactionHandler struct { + TransactionHandlers []TransactionHandler +} + +func (m *MultiTransactionHandler) HandlePreTransaction(ctx context.Context, info TxInfo) error { + for _, th := range m.TransactionHandlers { + if th == nil { + continue + } + if err := th.HandlePreTransaction(ctx, info); err != nil { + return err + } + } + return nil +} + +func (m *MultiTransactionHandler) HandlePostTransaction(ctx context.Context, info TxInfo) error { + for _, th := range m.TransactionHandlers { + if th == nil { + continue + } + if err := th.HandlePostTransaction(ctx, info); err != nil { + return err + } + } + return nil +} diff --git a/pkg/storage/client/transactional/interfaces.go b/pkg/storage/client/transactional/interfaces.go new file mode 100644 index 00000000..f99fdab3 --- /dev/null +++ b/pkg/storage/client/transactional/interfaces.go @@ -0,0 +1,80 @@ +package transactional + +import ( + "context" + + "github.com/weaveworks/libgitops/pkg/storage/client" + "github.com/weaveworks/libgitops/pkg/storage/core" +) + +type Client interface { + client.Reader + + BranchManager() BranchManager + BranchMerger() BranchMerger + + Transaction(ctx context.Context, opts ...TxOption) Tx + BranchTransaction(ctx context.Context, branchName string, opts ...TxOption) BranchTx +} + +type BranchManager interface { + CreateBranch(ctx context.Context, branch string) error + ResetToCleanBranch(ctx context.Context, branch string) error + Commit(ctx context.Context, commit Commit) error + + CommitHandler() CommitHandler + TransactionHandler() TransactionHandler +} + +type BranchMerger interface { + MergeBranches(ctx context.Context, base, head string, commit Commit) error +} + +type CustomTxFunc func(ctx context.Context) error + +type Tx interface { + Commit(Commit) error + Abort(err error) error + + Client() client.Client + + Custom(CustomTxFunc) Tx + + Get(key core.ObjectKey, obj core.Object) Tx + List(list core.ObjectList, opts ...core.ListOption) Tx + + Create(obj core.Object, opts ...core.CreateOption) Tx + Update(obj core.Object, opts ...core.UpdateOption) Tx + Patch(obj core.Object, patch core.Patch, opts ...core.PatchOption) Tx + Delete(obj core.Object, opts ...core.DeleteOption) Tx + DeleteAllOf(obj core.Object, opts ...core.DeleteAllOfOption) Tx + + UpdateStatus(obj core.Object, opts ...core.UpdateOption) Tx + PatchStatus(obj core.Object, patch core.Patch, opts ...core.PatchOption) Tx +} + +type BranchTx interface { + CreateTx(Commit) BranchTxResult + Abort(err error) error + + Client() client.Client + + Custom(CustomTxFunc) BranchTx + + Get(key core.ObjectKey, obj core.Object) BranchTx + List(list core.ObjectList, opts ...core.ListOption) BranchTx + + Create(obj core.Object, opts ...core.CreateOption) BranchTx + Update(obj core.Object, opts ...core.UpdateOption) BranchTx + Patch(obj core.Object, patch core.Patch, opts ...core.PatchOption) BranchTx + Delete(obj core.Object, opts ...core.DeleteOption) BranchTx + DeleteAllOf(obj core.Object, opts ...core.DeleteAllOfOption) BranchTx + + UpdateStatus(obj core.Object, opts ...core.UpdateOption) BranchTx + PatchStatus(obj core.Object, patch core.Patch, opts ...core.PatchOption) BranchTx +} + +type BranchTxResult interface { + Error() error + MergeWithBase(Commit) error +} diff --git a/pkg/storage/client/transactional/options.go b/pkg/storage/client/transactional/options.go new file mode 100644 index 00000000..6b3679c2 --- /dev/null +++ b/pkg/storage/client/transactional/options.go @@ -0,0 +1,66 @@ +package transactional + +import "time" + +type TxOption interface { + ApplyToTx(*TxOptions) +} + +var _ TxOption = &TxOptions{} + +func defaultTxOptions() *TxOptions { + return &TxOptions{ + Timeout: 1 * time.Minute, + Mode: TxModeAtomic, + } +} + +type TxOptions struct { + Timeout time.Duration + Mode TxMode +} + +func (o *TxOptions) ApplyToTx(target *TxOptions) { + if o.Timeout != 0 { + target.Timeout = o.Timeout + } + if len(o.Mode) != 0 { + target.Mode = o.Mode + } +} + +func (o *TxOptions) ApplyOptions(opts []TxOption) *TxOptions { + for _, opt := range opts { + opt.ApplyToTx(o) + } + return o +} + +var _ TxOption = TxMode("") + +type TxMode string + +const ( + // TxModeAtomic makes the transaction fully atomic, i.e. so + // that any read happening against the target branch during the + // lifetime of the transaction will be blocked until the completition + // of the transaction. + TxModeAtomic TxMode = "Atomic" + // TxModeAllowReading will allow reads targeting the given + // branch a transaction is executing against; but before the + // transaction has completed all reads will strictly return + // the data available prior to the transaction taking place. + TxModeAllowReading TxMode = "AllowReading" +) + +func (m TxMode) ApplyToTx(target *TxOptions) { + target.Mode = m +} + +var _ TxOption = TxTimeout(0) + +type TxTimeout time.Duration + +func (t TxTimeout) ApplyToTx(target *TxOptions) { + target.Timeout = time.Duration(t) +} diff --git a/pkg/storage/client/transactional/tx.go b/pkg/storage/client/transactional/tx.go new file mode 100644 index 00000000..30c6b6cd --- /dev/null +++ b/pkg/storage/client/transactional/tx.go @@ -0,0 +1,24 @@ +package transactional + +type txImpl struct { + *txCommon +} + +func (tx *txImpl) Commit(c Commit) error { + // Run the operations, and try to create the commit + if err := tx.tryApplyAndCommitOperations(c); err != nil { + // If we failed with the transaction, abort directly + return tx.Abort(err) + } + + // We successfully completed all the tasks needed + // Now, cleanup and unlock the branch + return tx.cleanupFunc() +} + +func (tx *txImpl) Custom(op CustomTxFunc) Tx { + tx.ops = append(tx.ops, func() error { + return op(tx.ctx) + }) + return tx +} diff --git a/pkg/storage/client/transactional/tx_branch.go b/pkg/storage/client/transactional/tx_branch.go new file mode 100644 index 00000000..c7011a36 --- /dev/null +++ b/pkg/storage/client/transactional/tx_branch.go @@ -0,0 +1,71 @@ +package transactional + +import ( + "context" + "fmt" +) + +type txBranchImpl struct { + *txCommon + + merger BranchMerger +} + +func (tx *txBranchImpl) CreateTx(c Commit) BranchTxResult { + // Run the operations, and try to create the commit + if err := tx.tryApplyAndCommitOperations(c); err != nil { + // If we failed with the transaction, abort directly, and + // return the error wrapped in a BranchTxResult + abortErr := tx.Abort(err) + return newErrTxResult(abortErr) + } + + // We successfully completed all the tasks needed + // Now, cleanup and unlock the branch + cleanupErr := tx.cleanupFunc() + + // Allow the merger to merge, if supported + return &txResultImpl{ + err: cleanupErr, + ctx: tx.ctx, + merger: tx.merger, + baseBranch: tx.info.Base, + headBranch: tx.info.Head, + } +} + +func (tx *txBranchImpl) Custom(op CustomTxFunc) BranchTx { + tx.ops = append(tx.ops, func() error { + return op(tx.ctx) + }) + return tx +} + +func newErrTxResult(err error) *txResultImpl { + return &txResultImpl{err: err} +} + +type txResultImpl struct { + err error + ctx context.Context + merger BranchMerger + baseBranch string + headBranch string +} + +func (r *txResultImpl) Error() error { + return r.err +} + +func (r *txResultImpl) MergeWithBase(c Commit) error { + // If there is an internal error, return it + if r.err != nil { + return r.err + } + // Make sure we have a merger + if r.merger == nil { + return fmt.Errorf("TxResult: The BranchMerger is nil") + } + // Try to merge the branch + return r.merger.MergeBranches(r.ctx, r.baseBranch, r.headBranch, c) +} diff --git a/pkg/storage/client/transactional/tx_common.go b/pkg/storage/client/transactional/tx_common.go new file mode 100644 index 00000000..e4111725 --- /dev/null +++ b/pkg/storage/client/transactional/tx_common.go @@ -0,0 +1,76 @@ +package transactional + +import ( + "context" + + "github.com/weaveworks/libgitops/pkg/storage/client" + utilerrs "k8s.io/apimachinery/pkg/util/errors" +) + +type txFunc func() error + +type txCommon struct { + err error + c client.Client + manager BranchManager + ctx context.Context + ops []txFunc + info TxInfo + cleanupFunc txFunc +} + +func (tx *txCommon) Client() client.Client { + return tx.c +} + +func (tx *txCommon) Abort(err error) error { + // Run the cleanup function and return an aggregate of the two possible errors + return utilerrs.NewAggregate([]error{ + err, + tx.cleanupFunc(), + }) +} + +func (tx *txCommon) handlePreCommit(c Commit) txFunc { + return func() error { + if tx.manager.CommitHandler() == nil { + return nil + } + return tx.manager.CommitHandler().HandlePreCommit(tx.ctx, c, tx.info) + } +} + +func (tx *txCommon) commit(c Commit) txFunc { + return func() error { + return tx.manager.Commit(tx.ctx, c) + } +} + +func (tx *txCommon) handlePostCommit(c Commit) txFunc { + return func() error { + if tx.manager.CommitHandler() == nil { + return nil + } + return tx.manager.CommitHandler().HandlePostCommit(tx.ctx, c, tx.info) + } +} + +func (tx *txCommon) tryApplyAndCommitOperations(c Commit) error { + // If an error occurred already before, just return it directly + if tx.err != nil { + return tx.err + } + + // First, all registered client operations are run + // Then Pre-commit, commit, and post-commit functions are run + // If at any stage the context is cancelled, an error is returned + // immediately, and no more functions in the chain are run. The + // same goes for errors from any of the functions, the chain is + // immediately interrupted on errors. + return execTransactionsCtx(tx.ctx, append( + tx.ops, + tx.handlePreCommit(c), + tx.commit(c), + tx.handlePostCommit(c), + )) +} diff --git a/pkg/storage/client/transactional/tx_ops.go b/pkg/storage/client/transactional/tx_ops.go new file mode 100644 index 00000000..e0a6c375 --- /dev/null +++ b/pkg/storage/client/transactional/tx_ops.go @@ -0,0 +1,105 @@ +package transactional + +import ( + "context" + + "github.com/weaveworks/libgitops/pkg/storage/core" +) + +func (tx *txImpl) Get(key core.ObjectKey, obj core.Object) Tx { + return tx.Custom(func(ctx context.Context) error { + return tx.c.Get(ctx, key, obj) + }) +} +func (tx *txImpl) List(list core.ObjectList, opts ...core.ListOption) Tx { + return tx.Custom(func(ctx context.Context) error { + return tx.c.List(ctx, list, opts...) + }) +} + +func (tx *txImpl) Create(obj core.Object, opts ...core.CreateOption) Tx { + return tx.Custom(func(ctx context.Context) error { + return tx.c.Create(ctx, obj, opts...) + }) +} +func (tx *txImpl) Update(obj core.Object, opts ...core.UpdateOption) Tx { + return tx.Custom(func(ctx context.Context) error { + return tx.c.Update(ctx, obj, opts...) + }) +} +func (tx *txImpl) Patch(obj core.Object, patch core.Patch, opts ...core.PatchOption) Tx { + return tx.Custom(func(ctx context.Context) error { + return tx.c.Patch(ctx, obj, patch, opts...) + }) +} +func (tx *txImpl) Delete(obj core.Object, opts ...core.DeleteOption) Tx { + return tx.Custom(func(ctx context.Context) error { + return tx.c.Delete(ctx, obj, opts...) + }) +} +func (tx *txImpl) DeleteAllOf(obj core.Object, opts ...core.DeleteAllOfOption) Tx { + return tx.Custom(func(ctx context.Context) error { + return tx.c.DeleteAllOf(ctx, obj, opts...) + }) +} + +func (tx *txImpl) UpdateStatus(obj core.Object, opts ...core.UpdateOption) Tx { + return tx.Custom(func(ctx context.Context) error { + return nil // TODO tx.c.Status().Update(ctx, obj, opts...) + }) +} +func (tx *txImpl) PatchStatus(obj core.Object, patch core.Patch, opts ...core.PatchOption) Tx { + return tx.Custom(func(ctx context.Context) error { + return nil // TODO tx.c.Status().Patch(ctx, obj, patch, opts...) + }) +} + +// TODO + +func (tx *txBranchImpl) Get(key core.ObjectKey, obj core.Object) BranchTx { + return tx.Custom(func(ctx context.Context) error { + return tx.c.Get(ctx, key, obj) + }) +} +func (tx *txBranchImpl) List(list core.ObjectList, opts ...core.ListOption) BranchTx { + return tx.Custom(func(ctx context.Context) error { + return tx.c.List(ctx, list, opts...) + }) +} + +func (tx *txBranchImpl) Create(obj core.Object, opts ...core.CreateOption) BranchTx { + return tx.Custom(func(ctx context.Context) error { + return tx.c.Create(ctx, obj, opts...) + }) +} +func (tx *txBranchImpl) Update(obj core.Object, opts ...core.UpdateOption) BranchTx { + return tx.Custom(func(ctx context.Context) error { + return tx.c.Update(ctx, obj, opts...) + }) +} +func (tx *txBranchImpl) Patch(obj core.Object, patch core.Patch, opts ...core.PatchOption) BranchTx { + return tx.Custom(func(ctx context.Context) error { + return tx.c.Patch(ctx, obj, patch, opts...) + }) +} +func (tx *txBranchImpl) Delete(obj core.Object, opts ...core.DeleteOption) BranchTx { + return tx.Custom(func(ctx context.Context) error { + return tx.c.Delete(ctx, obj, opts...) + }) +} +func (tx *txBranchImpl) DeleteAllOf(obj core.Object, opts ...core.DeleteAllOfOption) BranchTx { + return tx.Custom(func(ctx context.Context) error { + return tx.c.DeleteAllOf(ctx, obj, opts...) + }) +} + +func (tx *txBranchImpl) UpdateStatus(obj core.Object, opts ...core.UpdateOption) BranchTx { + return tx.Custom(func(ctx context.Context) error { + return nil // TODO tx.c.Status().Update(ctx, obj, opts...) + }) +} +func (tx *txBranchImpl) PatchStatus(obj core.Object, patch core.Patch, opts ...core.PatchOption) BranchTx { + return tx.Custom(func(ctx context.Context) error { + return nil // TODO tx.c.Status().Patch(ctx, obj, patch, opts...) + }) +} diff --git a/pkg/storage/client/transactional/utils.go b/pkg/storage/client/transactional/utils.go new file mode 100644 index 00000000..4812266f --- /dev/null +++ b/pkg/storage/client/transactional/utils.go @@ -0,0 +1,21 @@ +package transactional + +import "context" + +// execTransactionsCtx executes the functions in order. Before each +// function in the chain is run; the context is checked for errors +// (e.g. if it has been cancelled or timed out). If a context error +// is returned, or if a function in the chain returns an error, this +// function returns directly, without executing the rest of the +// functions in the chain. +func execTransactionsCtx(ctx context.Context, funcs []txFunc) error { + for _, fn := range funcs { + if err := ctx.Err(); err != nil { + return err + } + if err := fn(); err != nil { + return err + } + } + return nil +} From 11b0a6e7fa85143cb61f60062d4cb2028553fe9e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 27 Jan 2021 10:35:11 +0200 Subject: [PATCH 066/149] Check in the VersionRef file. --- pkg/storage/core/interfaces.go | 26 ++++++----- pkg/storage/core/versionref.go | 80 ++++++++++++++++++++++++++++++++++ 2 files changed, 94 insertions(+), 12 deletions(-) create mode 100644 pkg/storage/core/versionref.go diff --git a/pkg/storage/core/interfaces.go b/pkg/storage/core/interfaces.go index 208a6272..b25cec3a 100644 --- a/pkg/storage/core/interfaces.go +++ b/pkg/storage/core/interfaces.go @@ -67,18 +67,20 @@ type ObjectID interface { GroupVersionKind() GroupVersionKind } +// VersionRef is an interface that describes a reference to a specific version +// of Objects in a Storage or Client. type VersionRef interface { - IsZero() bool + // String returns the commit or branch name. String() string - Type() VersionRefType + // IsWritable determines if the VersionRef points to such a state where it + // is possible to write on top of it, i.e. as in the case of a Git branch. + // + // A specific Git commit, however, isn't considered writable, as it points + // to a specific point in time that can't just be rewritten, (assuming this + // library only is additive, which it is). + IsWritable() bool + // IsZeroValue determines if this VersionRef is the "zero value", which means + // that the caller should figure out how to handle that the user did not + // give specific opinions of what version of the Object to get. + IsZeroValue() bool } - -// VersionRefType specifies if the VersionRef is a commit (i.e. a read-only snapshot), or -// a writable branch. The terminology here is similar to that of Git, so people feel familiar -// with the concepts, but there is not requirement to use Git. -type VersionRefType int - -const ( - VersionRefTypeCommit VersionRefType = 1 + iota - VersionRefTypeBranch -) diff --git a/pkg/storage/core/versionref.go b/pkg/storage/core/versionref.go new file mode 100644 index 00000000..c9b3892b --- /dev/null +++ b/pkg/storage/core/versionref.go @@ -0,0 +1,80 @@ +package core + +import ( + "context" + "errors" +) + +var versionRefKey = versionRefKeyImpl{} + +type versionRefKeyImpl struct{} + +// WithVersionRef attaches the given VersionRef to a Context (it +// overwrites if one already exists in ctx). The key for the ref +// is private in this package, so one must use this function to +// register it. +func WithVersionRef(ctx context.Context, ref VersionRef) context.Context { + return context.WithValue(ctx, versionRefKey, ref) +} + +// GetVersionRef returns the VersionRef attached to this context. +// If there is no attached VersionRef, or it is nil, a BranchRef +// with branch "" will be returned as the "zero value" of VersionRef. +func GetVersionRef(ctx context.Context) VersionRef { + r, ok := ctx.Value(versionRefKey).(VersionRef) + // Return default ref if none specified + if r == nil || !ok { + return NewBranchRef("") + } + return r +} + +var ErrInvalidVersionRefType = errors.New("invalid version ref type") + +// NewBranchRef creates a new VersionRef for a given branch. It is +// valid for the branch to be ""; in this case it means the "zero +// value", or unspecified branch to be more precise, where the caller +// can choose how to handle. +func NewBranchRef(branch string) VersionRef { return branchRef{branch} } + +// NewCommitRef creates a new VersionRef for the given commit. The +// commit must uniquely define a certain revision precisely. It must +// not be an empty string. +func NewCommitRef(commit string) (VersionRef, error) { + if len(commit) == 0 { + return nil, errors.New("commit must not be an empty string") + } + return commitRef{commit}, nil +} + +// MustNewCommitRef runs NewCommitRef, but panics on errors +func MustNewCommitRef(commit string) VersionRef { + ref, err := NewCommitRef(commit) + if err != nil { + panic(err) + } + return ref +} + +type branchRef struct{ branch string } + +func (r branchRef) String() string { return r.branch } + +// A branch is considered writable, as commits can be added to it by libgitops +func (branchRef) IsWritable() bool { return true } + +// A branch is considered the zero value if the branch is an empty string, +// which it is e.g. when there was no VersionRef associated with a Context. +func (r branchRef) IsZeroValue() bool { return r.branch == "" } + +type commitRef struct{ commit string } + +func (r commitRef) String() string { return r.commit } + +// A commit is not considered writable, as it is only a read snapshot of +// a specific point in time. +func (commitRef) IsWritable() bool { return false } + +// IsZeroValue should always return false for commits; as commit is mandatory +// to be a non-empty string. +func (r commitRef) IsZeroValue() bool { return r.commit == "" } From d38a7c399306c44a5cd678f7ee08f0eb82123369 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 27 Jan 2021 10:36:21 +0200 Subject: [PATCH 067/149] Simplify the Client.List operation significantly using utilerrs. --- pkg/storage/client/client.go | 102 +++++++++++++++-------------------- 1 file changed, 44 insertions(+), 58 deletions(-) diff --git a/pkg/storage/client/client.go b/pkg/storage/client/client.go index 0f48974e..6ca3e328 100644 --- a/pkg/storage/client/client.go +++ b/pkg/storage/client/client.go @@ -4,9 +4,7 @@ import ( "context" "errors" "fmt" - "sync" - "github.com/fluxcd/go-git-providers/validation" "github.com/weaveworks/libgitops/pkg/filter" "github.com/weaveworks/libgitops/pkg/serializer" "github.com/weaveworks/libgitops/pkg/storage/backend" @@ -16,6 +14,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" kruntime "k8s.io/apimachinery/pkg/runtime" + utilerrs "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -131,33 +130,31 @@ func (c *Generic) List(ctx context.Context, list core.ObjectList, opts ...client // TODO: Is this a good default? Need to balance mem usage and speed. This is prob. too much ch := make(chan core.Object, len(allIDs)) - wg := &sync.WaitGroup{} - wg.Add(1) - var processErr error + + objs := make([]kruntime.Object, 0, len(allIDs)) go func() { - createFunc := createObject(gvk, c.Backend().Scheme()) - if serializer.IsPartialObjectList(list) { - createFunc = createPartialObject(gvk) - } else if serializer.IsUnstructuredList(list) { - createFunc = createUnstructuredObject(gvk) + for o := range ch { + objs = append(objs, o) } - processErr = c.processKeys(ctx, allIDs, &listOpts.FilterOptions, createFunc, ch) - wg.Done() }() - objs := make([]kruntime.Object, 0, len(allIDs)) - for o := range ch { - objs = append(objs, o) + createFunc := createObject(gvk, c.Backend().Scheme()) + if serializer.IsPartialObjectList(list) { + createFunc = createPartialObject(gvk) + } else if serializer.IsUnstructuredList(list) { + createFunc = createUnstructuredObject(gvk) } - // Wait for processErr to be set, and the above goroutine to finish - wg.Wait() - if processErr != nil { - return processErr + // Start one goroutine per ID, and get back an aggregate error + err = c.processKeys(ctx, allIDs, &listOpts.FilterOptions, createFunc, ch) + // Always unconditionally stop the channel after this, we know there won't + // be any more writes to it. This will terminate the for-range loop above. + close(ch) + if err != nil { + return err } // Populate the List's Items field with the objects returned - meta.SetList(list, objs) - return nil + return meta.SetList(list, objs) } func (c *Generic) Create(ctx context.Context, obj core.Object, _ ...client.CreateOption) error { @@ -282,45 +279,34 @@ func createUnstructuredObject(gvk core.GroupVersionKind) newObjectFunc { } func (c *Generic) processKeys(ctx context.Context, ids []core.UnversionedObjectID, filterOpts *filter.FilterOptions, fn newObjectFunc, output chan core.Object) error { - wg := &sync.WaitGroup{} - wg.Add(len(ids)) - multiErr := &validation.MultiError{} // TODO: Thread-safe append - for _, i := range ids { - go func(id core.UnversionedObjectID) { - defer wg.Done() - - // Create a new object, and decode into it using Get - obj, err := fn() - if err != nil { - multiErr.Errors = append(multiErr.Errors, err) - return - } - - if err := c.Get(ctx, id.ObjectKey(), obj); err != nil { - multiErr.Errors = append(multiErr.Errors, err) - return - } - - // Match the object against the filters - matched, err := filterOpts.Match(obj) - if err != nil { - multiErr.Errors = append(multiErr.Errors, err) - return - } - if !matched { - return - } + goroutines := []func() error{} + for _, id := range ids { + goroutines = append(goroutines, c.processKey(ctx, id, filterOpts, fn, output)) + } + return utilerrs.AggregateGoroutines(goroutines...) +} +func (c *Generic) processKey(ctx context.Context, id core.UnversionedObjectID, filterOpts *filter.FilterOptions, fn newObjectFunc, output chan core.Object) func() error { + return func() error { + // Create a new object, and decode into it using Get + obj, err := fn() + if err != nil { + return err + } + + if err := c.Get(ctx, id.ObjectKey(), obj); err != nil { + return err + } + + // Match the object against the filters + matched, err := filterOpts.Match(obj) + if err != nil { + return err + } + if matched { output <- obj - }(i) - } - wg.Wait() - // Close the output channel so that the for-range loop stops - close(output) + } - // TODO: upstream this - if len(multiErr.Errors) != 0 { - return multiErr + return nil } - return nil } From 2a8792c100a52bb554d78d44f731b6d33b83ffa6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 27 Jan 2021 10:44:20 +0200 Subject: [PATCH 068/149] Use the slightly updated versionref interface --- pkg/storage/client/transactional/client.go | 39 +++++++++++----------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/pkg/storage/client/transactional/client.go b/pkg/storage/client/transactional/client.go index f0c716a9..4295bda8 100644 --- a/pkg/storage/client/transactional/client.go +++ b/pkg/storage/client/transactional/client.go @@ -67,20 +67,20 @@ func (c *Generic) List(ctx context.Context, list core.ObjectList, opts ...core.L func (c *Generic) lockForReading(ctx context.Context, operation func() error) error { ref := core.GetVersionRef(ctx) - switch ref.Type() { - case core.VersionRefTypeCommit: - // Never block reads for specific commits + if !ref.IsWritable() { + // Never block reads for read-only VersionRefs. We know nobody can change + // them during the read operation, so they should be race condition-free. return operation() - case core.VersionRefTypeBranch: - return c.readBranch(ref.String(), operation) - default: - return fmt.Errorf("%w: %s", core.ErrInvalidVersionRefType, ref.Type()) } + // If the VersionRef is writable; treat it as a branch and lock it to avoid + // race conditions. + return c.lockAndReadBranch(ref.String(), operation) } -func (c *Generic) readBranch(branch string, callback func() error) error { - // Aquire the tx-specific lock +func (c *Generic) lockAndReadBranch(branch string, callback func() error) error { + // Use c.txsMu to guard reads and writes to the c.txs map c.txsMu.Lock() + // Check if information about a transaction on this branch exists. txState, ok := c.txs[branch] if !ok { // grow the txs map by one @@ -90,8 +90,12 @@ func (c *Generic) readBranch(branch string, callback func() error) error { txState = c.txs[branch] } c.txsMu.Unlock() - // During this period, no transactions can be started, - // only reads can be active + + // In the atomic mode, we lock the txLock during the read, + // so no new transactions can be started while the read + // operation goes on. In non-atomic modes, reads aren't locked, + // instead it is assumed that downstream implementations just + // read the latest commit on the given branch. if txState.mode == TxModeAtomic { txState.mu.RLock() } @@ -218,16 +222,13 @@ func (c *Generic) BranchTransaction(ctx context.Context, headBranch string, opts } func (c *Generic) validateCtx(ctx context.Context) (core.VersionRef, error) { - // Check so versionref isn't set here + // Check so versionref is writable ref := core.GetVersionRef(ctx) - switch ref.Type() { - case core.VersionRefTypeCommit: - return nil, fmt.Errorf("must not give a VersionRef of type Commit to (Branch)Transaction()") - case core.VersionRefTypeBranch: - return ref, nil - default: - return nil, fmt.Errorf("%w: %s", core.ErrInvalidVersionRefType, ref.Type()) + if !ref.IsWritable() { + return nil, fmt.Errorf("must not give a writable VersionRef to (Branch)Transaction()") } + + return ref, nil } func (c *Generic) transaction(ctx context.Context, opts ...TxOption) (Tx, error) { From 51844720dd5068927a87c9733d636e8eba632c78 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 27 Jan 2021 16:56:51 +0200 Subject: [PATCH 069/149] Rename the handlers to hooks, and add a chain interface too, which allows upstream consumers to hook into the operations. --- pkg/storage/client/transactional/client.go | 20 +++--- pkg/storage/client/transactional/handlers.go | 72 +++++++++++++------ .../client/transactional/interfaces.go | 6 +- pkg/storage/client/transactional/tx_common.go | 10 +-- 4 files changed, 64 insertions(+), 44 deletions(-) diff --git a/pkg/storage/client/transactional/client.go b/pkg/storage/client/transactional/client.go index 4295bda8..0c75cdce 100644 --- a/pkg/storage/client/transactional/client.go +++ b/pkg/storage/client/transactional/client.go @@ -180,17 +180,13 @@ func (c *Generic) initTx(ctx context.Context, info TxInfo) (context.Context, txF } func (c *Generic) cleanupAfterTx(ctx context.Context, info *TxInfo) error { - // Always run both the "clean branch" command... - errs := []error{c.manager.ResetToCleanBranch(ctx, info.Base)} - if c.manager.TransactionHandler() != nil { - // ... and the post-transaction command + // Always both clean the branch, and run post-tx tasks + return utilerrs.NewAggregate([]error{ + c.manager.ResetToCleanBranch(ctx, info.Base), // TODO: should this be in its own goroutine to switch back to main // ASAP? - errs = append(errs, - c.manager.TransactionHandler().HandlePostTransaction(ctx, *info)) - } - // Return an aggregate error - return utilerrs.NewAggregate(errs) + c.manager.TransactionHookChain().PostTransactionHook(ctx, *info), + }) } func (c *Generic) BackendReader() backend.Reader { @@ -227,7 +223,7 @@ func (c *Generic) validateCtx(ctx context.Context) (core.VersionRef, error) { if !ref.IsWritable() { return nil, fmt.Errorf("must not give a writable VersionRef to (Branch)Transaction()") } - + // Just return its return ref, nil } @@ -251,7 +247,7 @@ func (c *Generic) transaction(ctx context.Context, opts ...TxOption) (Tx, error) ctxWithDeadline, cleanupFunc := c.initTx(ctx, info) // Run pre-tx checks - err = c.manager.TransactionHandler().HandlePreTransaction(ctxWithDeadline, info) + err = c.manager.TransactionHookChain().PreTransactionHook(ctxWithDeadline, info) return &txImpl{ &txCommon{ @@ -305,7 +301,7 @@ func (c *Generic) branchTransaction(ctx context.Context, headBranch string, opts // Run pre-tx checks and create the new branch err = utilerrs.NewAggregate([]error{ - c.manager.TransactionHandler().HandlePreTransaction(ctxWithDeadline, info), + c.manager.TransactionHookChain().PreTransactionHook(ctxWithDeadline, info), c.manager.CreateBranch(ctxWithDeadline, headBranch), }) diff --git a/pkg/storage/client/transactional/handlers.go b/pkg/storage/client/transactional/handlers.go index 570a1739..aa438e3f 100644 --- a/pkg/storage/client/transactional/handlers.go +++ b/pkg/storage/client/transactional/handlers.go @@ -8,66 +8,94 @@ type TxInfo struct { Options TxOptions } -type CommitHandler interface { - HandlePreCommit(ctx context.Context, commit Commit, info TxInfo) error - HandlePostCommit(ctx context.Context, commit Commit, info TxInfo) error +type CommitHookChain interface { + // The chain also itself implements CommitHook + CommitHook + // Register registers a new CommitHook to the chain + Register(CommitHook) } -type MultiCommitHandler struct { - CommitHandlers []CommitHandler +type CommitHook interface { + PreCommitHook(ctx context.Context, commit Commit, info TxInfo) error + PostCommitHook(ctx context.Context, commit Commit, info TxInfo) error } -func (m *MultiCommitHandler) HandlePreCommit(ctx context.Context, commit Commit, info TxInfo) error { - for _, ch := range m.CommitHandlers { +var _ CommitHookChain = &MultiCommitHook{} +var _ CommitHook = &MultiCommitHook{} + +type MultiCommitHook struct { + CommitHooks []CommitHook +} + +func (m *MultiCommitHook) Register(h CommitHook) { + m.CommitHooks = append(m.CommitHooks, h) +} + +func (m *MultiCommitHook) PreCommitHook(ctx context.Context, commit Commit, info TxInfo) error { + for _, ch := range m.CommitHooks { if ch == nil { continue } - if err := ch.HandlePreCommit(ctx, commit, info); err != nil { + if err := ch.PreCommitHook(ctx, commit, info); err != nil { return err } } return nil } -func (m *MultiCommitHandler) HandlePostCommit(ctx context.Context, commit Commit, info TxInfo) error { - for _, ch := range m.CommitHandlers { +func (m *MultiCommitHook) PostCommitHook(ctx context.Context, commit Commit, info TxInfo) error { + for _, ch := range m.CommitHooks { if ch == nil { continue } - if err := ch.HandlePostCommit(ctx, commit, info); err != nil { + if err := ch.PostCommitHook(ctx, commit, info); err != nil { return err } } return nil } -type TransactionHandler interface { - HandlePreTransaction(ctx context.Context, info TxInfo) error - HandlePostTransaction(ctx context.Context, info TxInfo) error +type TransactionHookChain interface { + // The chain also itself implements TransactionHook + TransactionHook + // Register registers a new CommitHook to the chain + Register(TransactionHook) +} + +type TransactionHook interface { + PreTransactionHook(ctx context.Context, info TxInfo) error + PostTransactionHook(ctx context.Context, info TxInfo) error +} + +var _ TransactionHookChain = &MultiTransactionHook{} +var _ TransactionHook = &MultiTransactionHook{} + +type MultiTransactionHook struct { + TransactionHooks []TransactionHook } -type MultiTransactionHandler struct { - TransactionHandlers []TransactionHandler +func (m *MultiTransactionHook) Register(h TransactionHook) { + m.TransactionHooks = append(m.TransactionHooks, h) } -func (m *MultiTransactionHandler) HandlePreTransaction(ctx context.Context, info TxInfo) error { - for _, th := range m.TransactionHandlers { +func (m *MultiTransactionHook) PreTransactionHook(ctx context.Context, info TxInfo) error { + for _, th := range m.TransactionHooks { if th == nil { continue } - if err := th.HandlePreTransaction(ctx, info); err != nil { + if err := th.PreTransactionHook(ctx, info); err != nil { return err } } return nil } -func (m *MultiTransactionHandler) HandlePostTransaction(ctx context.Context, info TxInfo) error { - for _, th := range m.TransactionHandlers { +func (m *MultiTransactionHook) PostTransactionHook(ctx context.Context, info TxInfo) error { + for _, th := range m.TransactionHooks { if th == nil { continue } - if err := th.HandlePostTransaction(ctx, info); err != nil { + if err := th.PostTransactionHook(ctx, info); err != nil { return err } } diff --git a/pkg/storage/client/transactional/interfaces.go b/pkg/storage/client/transactional/interfaces.go index f99fdab3..7371f4c3 100644 --- a/pkg/storage/client/transactional/interfaces.go +++ b/pkg/storage/client/transactional/interfaces.go @@ -22,8 +22,10 @@ type BranchManager interface { ResetToCleanBranch(ctx context.Context, branch string) error Commit(ctx context.Context, commit Commit) error - CommitHandler() CommitHandler - TransactionHandler() TransactionHandler + // CommitHookChain must be non-nil, but can be a no-op + CommitHookChain() CommitHookChain + // TransactionHookChain must be non-nil, but can be a no-op + TransactionHookChain() TransactionHookChain } type BranchMerger interface { diff --git a/pkg/storage/client/transactional/tx_common.go b/pkg/storage/client/transactional/tx_common.go index e4111725..3448c811 100644 --- a/pkg/storage/client/transactional/tx_common.go +++ b/pkg/storage/client/transactional/tx_common.go @@ -33,10 +33,7 @@ func (tx *txCommon) Abort(err error) error { func (tx *txCommon) handlePreCommit(c Commit) txFunc { return func() error { - if tx.manager.CommitHandler() == nil { - return nil - } - return tx.manager.CommitHandler().HandlePreCommit(tx.ctx, c, tx.info) + return tx.manager.CommitHookChain().PreCommitHook(tx.ctx, c, tx.info) } } @@ -48,10 +45,7 @@ func (tx *txCommon) commit(c Commit) txFunc { func (tx *txCommon) handlePostCommit(c Commit) txFunc { return func() error { - if tx.manager.CommitHandler() == nil { - return nil - } - return tx.manager.CommitHandler().HandlePostCommit(tx.ctx, c, tx.info) + return tx.manager.CommitHookChain().PostCommitHook(tx.ctx, c, tx.info) } } From 9ea7647cea96a16eb04b33d42393f6719ca14f5e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 27 Jan 2021 23:00:42 +0200 Subject: [PATCH 070/149] Make the PathExcluder simpler; create a multi-one and a file extension excluder. --- pkg/storage/filesystem/dir_traversal.go | 2 +- pkg/storage/filesystem/path_excluder.go | 103 ++++++++++++++----- pkg/storage/filesystem/path_excluder_test.go | 22 ++-- 3 files changed, 92 insertions(+), 35 deletions(-) diff --git a/pkg/storage/filesystem/dir_traversal.go b/pkg/storage/filesystem/dir_traversal.go index 59668765..12284d71 100644 --- a/pkg/storage/filesystem/dir_traversal.go +++ b/pkg/storage/filesystem/dir_traversal.go @@ -27,7 +27,7 @@ func ListValidFilesInFilesystem(ctx context.Context, fs Filesystem, contentTyper // that contentTyper recognizes, and is not a path that is excluded by pathExcluder. func IsValidFileInFilesystem(ctx context.Context, fs Filesystem, contentTyper ContentTyper, pathExcluder PathExcluder, file string) bool { // return false if this path should be excluded - if pathExcluder.ShouldExcludePath(ctx, file) { + if pathExcluder.ShouldExcludePath(file) { return false } diff --git a/pkg/storage/filesystem/path_excluder.go b/pkg/storage/filesystem/path_excluder.go index cb358648..58e8d2a6 100644 --- a/pkg/storage/filesystem/path_excluder.go +++ b/pkg/storage/filesystem/path_excluder.go @@ -1,8 +1,11 @@ package filesystem import ( - "context" + "os" "path/filepath" + "strings" + + "k8s.io/apimachinery/pkg/util/sets" ) // PathExcluder is an interface that lets the user implement custom policies @@ -10,34 +13,80 @@ import ( // that directory) should be considered for an operation (e.g. inotify watch // or file search). type PathExcluder interface { - // ShouldExcludePath takes in a context, the fs filesystem abstraction, - // and a relative path to the file which should be determined if it should - // be excluded or not. - ShouldExcludePath(ctx context.Context, path string) bool -} - -// ExcludeGitDirectory implements PathExcluder. -var _ PathExcluder = ExcludeGitDirectory{} - -// ExcludeGitDirectory is a sample implementation of PathExcluder, that excludes -// all files under a ".git" directory, anywhere in the tree under the root directory. -type ExcludeGitDirectory struct{} - -func (ExcludeGitDirectory) ShouldExcludePath(_ context.Context, path string) bool { - // Always start from a clean path - path = filepath.Clean(path) - for { - // get the current base entry name - baseName := filepath.Base(path) - // This means path is now an empty string; we did not find a .git directory anywhere - if baseName == "." { - return false + // ShouldExcludePath takes in a relative path to the file which maybe + // should be excluded. + ShouldExcludePath(path string) bool +} + +// DefaultPathExcluders returns a composition of +// ExcludeDirectoryNames{} for ".git" dirs and ExcludeExtensions{} for the ".swp" file extensions. +func DefaultPathExcluders() PathExcluder { + return MultiPathExcluder{ + PathExcluders: []PathExcluder{ + ExcludeDirectoryNames{ + DirectoryNamesToExclude: []string{".git"}, + }, + ExcludeExtensions{ + Extensions: []string{".swp"}, // nano creates temporary .swp + }, + }, + } +} + +// ExcludeDirectoryNames implements PathExcluder. +var _ PathExcluder = ExcludeDirectoryNames{} + +// ExcludeDirectories is a sample implementation of PathExcluder, that excludes +// files that have any parent directories with the given names. +type ExcludeDirectoryNames struct { + DirectoryNamesToExclude []string +} + +func (e ExcludeDirectoryNames) ShouldExcludePath(path string) bool { + parts := strings.Split(filepath.Clean(path), string(os.PathSeparator)) + return sets.NewString(parts[:len(parts)-1]...).HasAny(e.DirectoryNamesToExclude...) +} + +// ExcludeExtensions implements PathExcluder. +var _ PathExcluder = ExcludeExtensions{} + +// ExcludeExtensions is a sample implementation of PathExcluder, that excludes +// all files with the given extensions. The strings in the Extensions slice +// must be in the form of filepath.Ext, i.e. ".json", ".txt", and so forth. +// The zero value of ExcludeExtensions excludes no files. +type ExcludeExtensions struct { + Extensions []string +} + +func (e ExcludeExtensions) ShouldExcludePath(path string) bool { + ext := filepath.Ext(path) + for _, exclExt := range e.Extensions { + if ext == exclExt { + return true + } + } + return false +} + +// MultiPathExcluder implements PathExcluder. +var _ PathExcluder = &MultiPathExcluder{} + +// MultiPathExcluder is a composite PathExcluder that runs all of the +// PathExcluders in the slice one-by-one, and returns true if any of them +// does. The zero value of MultiPathExcluder excludes no files. +type MultiPathExcluder struct { + PathExcluders []PathExcluder +} + +func (m MultiPathExcluder) ShouldExcludePath(path string) bool { + // Loop through all the excluders, and return true if any of them does + for _, excl := range m.PathExcluders { + if excl == nil { + continue } - // We possibly found a directory named git; this path should be excluded - if baseName == ".git" { + if excl.ShouldExcludePath(path) { return true } - // "go up" one directory for the next iteration - path = filepath.Dir(path) } + return false } diff --git a/pkg/storage/filesystem/path_excluder_test.go b/pkg/storage/filesystem/path_excluder_test.go index 46c7c039..5995fd27 100644 --- a/pkg/storage/filesystem/path_excluder_test.go +++ b/pkg/storage/filesystem/path_excluder_test.go @@ -1,7 +1,6 @@ package filesystem import ( - "context" "testing" ) @@ -13,17 +12,17 @@ func TestExcludeGitDirectory_ShouldExcludePath(t *testing.T) { }{ { name: "normal", - path: ".git", + path: ".git/foo", want: true, }, { name: "with relative path", - path: "./.git", + path: "./.git/bar/baz", want: true, }, { name: "with many parents", - path: "/foo/bar/.git", + path: "/foo/bar/.git/hello", want: true, }, { @@ -56,12 +55,21 @@ func TestExcludeGitDirectory_ShouldExcludePath(t *testing.T) { path: ".gitea", want: false, }, + { + name: "absolute path without git", + path: "/foo/bar/no/git/here", + want: false, + }, + { + name: "don't catch files named .git", + path: "/hello/.git", + want: false, + }, } - e := ExcludeGitDirectory{} - ctx := context.Background() + e := ExcludeDirectoryNames{DirectoryNamesToExclude: []string{".git"}} for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := e.ShouldExcludePath(ctx, tt.path); got != tt.want { + if got := e.ShouldExcludePath(tt.path); got != tt.want { t.Errorf("ExcludeGitDirectory.ShouldExcludePath() = %v, want %v", got, tt.want) } }) From a326065772d658115a231f4257d3cd9f5f7459e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 27 Jan 2021 23:01:50 +0200 Subject: [PATCH 071/149] Update the monitor to take in an error. --- .../filesystem/unstructured/event/storage.go | 7 ++--- pkg/util/sync/monitor.go | 26 ++++++++++++------- 2 files changed, 21 insertions(+), 12 deletions(-) diff --git a/pkg/storage/filesystem/unstructured/event/storage.go b/pkg/storage/filesystem/unstructured/event/storage.go index ef64dec8..0d674b52 100644 --- a/pkg/storage/filesystem/unstructured/event/storage.go +++ b/pkg/storage/filesystem/unstructured/event/storage.go @@ -212,11 +212,12 @@ func (s *Generic) getPath(ctx context.Context, id core.UnversionedObjectID) (str func (s *Generic) Close() error { err := s.emitter.Close() - s.monitor.Wait() + // No need to check the error here + _ = s.monitor.Wait() return err } -func (s *Generic) monitorFunc() { +func (s *Generic) monitorFunc() error { logrus.Debug("WatchStorage: Monitoring thread started") defer logrus.Debug("WatchStorage: Monitoring thread stopped") @@ -227,7 +228,7 @@ func (s *Generic) monitorFunc() { ev, ok := <-s.inbound if !ok { logrus.Error("WatchStorage: Fatal: Got non-ok response from watcher.GetFileEventStream()") - return + return nil } logrus.Tracef("WatchStorage: Processing event: %s", ev.Type) diff --git a/pkg/util/sync/monitor.go b/pkg/util/sync/monitor.go index f09c55ca..111a294d 100644 --- a/pkg/util/sync/monitor.go +++ b/pkg/util/sync/monitor.go @@ -1,31 +1,39 @@ package sync -import "sync" +import ( + "errors" + "sync" +) // Monitor is a convenience wrapper around // starting a goroutine with a wait group, // which can be used to wait for the // goroutine to stop. type Monitor struct { - wg *sync.WaitGroup + wg *sync.WaitGroup + err error } -func RunMonitor(f func()) (m *Monitor) { - m = &Monitor{ +func RunMonitor(f func() error) *Monitor { + m := &Monitor{ wg: new(sync.WaitGroup), } m.wg.Add(1) go func() { - f() + m.err = f() m.wg.Done() }() - return + return m } -func (m *Monitor) Wait() { - if m != nil { - m.wg.Wait() +func (m *Monitor) Wait() error { + // TODO: Do we need this check? + if m == nil { + return errors.New("Monitor: invalid null pointer to m") } + // TODO: maybe this could be easier implemented using just a channel? + m.wg.Wait() + return m.err } From c45e37bf228113606631e946a03e76d91fcc6bca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 27 Jan 2021 23:02:29 +0200 Subject: [PATCH 072/149] Use the new pathexcluder and monitor in the filewatcher, and always exclude such files we don't want to watch. --- .../fileevents/inotify/filewatcher.go | 36 +++++++++++++------ .../filesystem/fileevents/inotify/options.go | 4 +-- 2 files changed, 27 insertions(+), 13 deletions(-) diff --git a/pkg/storage/filesystem/fileevents/inotify/filewatcher.go b/pkg/storage/filesystem/fileevents/inotify/filewatcher.go index 4c0f8b13..58d85186 100644 --- a/pkg/storage/filesystem/fileevents/inotify/filewatcher.go +++ b/pkg/storage/filesystem/fileevents/inotify/filewatcher.go @@ -112,24 +112,34 @@ func (w *FileWatcher) WatchForFileEvents(ctx context.Context, into fileevents.Fi return nil // all ok } -func (w *FileWatcher) monitorFunc() { +func (w *FileWatcher) monitorFunc() error { log.Debug("FileWatcher: Monitoring thread started") defer log.Debug("FileWatcher: Monitoring thread stopped") defer close(w.outbound) // Close the update stream after the FileWatcher has stopped - ctx := context.Background() - for { event, ok := <-w.inbound if !ok { - return + logrus.Debug("FileWatcher: Got non-ok channel recieve from w.inbound, exiting monitorFunc") + return nil } if ievent(event).Mask&unix.IN_ISDIR != 0 { continue // Skip directories } - if w.opts.PathExcluder.ShouldExcludePath(ctx, event.Path()) { + // Get the relative path between the root directory and the changed file + // Note: This is just used for the PathExcluder, absolute paths are used + // in the underlying file-change computation system, until in sendUpdate + // where they are converted into relative paths before sending to the listener. + relativePath, err := filepath.Rel(w.dir, event.Path()) + if err != nil { + logrus.Errorf("FileWatcher: Error occurred when computing relative path between: %s and %s: %v", w.dir, event.Path(), err) + continue + } + + // The PathExcluder only operates on relative paths. + if w.opts.PathExcluder.ShouldExcludePath(relativePath) { continue // Skip ignored files } @@ -142,18 +152,20 @@ func (w *FileWatcher) monitorFunc() { eventList = append(eventList, event) // Register the event in the map, and dispatch all the events at once after the timeout + // Note that event.Path() is just the unique key for the map here, it is not actually + // used later when computing the changes of the filesystem. w.batcher.Store(event.Path(), eventList) log.Debugf("FileWatcher: Registered inotify events %v for path %q", eventList, event.Path()) } } -func (w *FileWatcher) dispatchFunc() { +func (w *FileWatcher) dispatchFunc() error { log.Debug("FileWatcher: Dispatch thread started") defer log.Debug("FileWatcher: Dispatch thread stopped") for { // Wait until we have a batch dispatched to us - ok := w.batcher.ProcessBatch(func(key, val interface{}) bool { + ok := w.batcher.ProcessBatch(func(_, val interface{}) bool { // Concatenate all known events, and dispatch them to be handled one by one for _, event := range w.concatenateEvents(val.(notifyEvents)) { w.sendUpdate(event) @@ -163,7 +175,8 @@ func (w *FileWatcher) dispatchFunc() { return true }) if !ok { - return // The BatchWriter channel is closed, stop processing + logrus.Debug("FileWatcher: Got non-ok channel recieve from w.batcher, exiting dispatchFunc") + return nil // The BatchWriter channel is closed, stop processing } log.Debug("FileWatcher: Dispatched events batch and reset the events cache") @@ -194,8 +207,9 @@ func (w *FileWatcher) Close() error { notify.Stop(w.inbound) w.batcher.Close() close(w.inbound) // Close the inbound event stream - w.monitor.Wait() - w.dispatcher.Wait() + // No need to check the error here, as we only return nil above + _ = w.monitor.Wait() + _ = w.dispatcher.Wait() return nil } @@ -258,7 +272,7 @@ func (w *FileWatcher) newMoveCache(event notify.EventInfo) *moveCache { } // moveCaches wait one second to be cancelled before firing - m.timer = time.AfterFunc(time.Second, m.incomplete) + m.timer = time.AfterFunc(w.opts.BatchTimeout, m.incomplete) return m } diff --git a/pkg/storage/filesystem/fileevents/inotify/options.go b/pkg/storage/filesystem/fileevents/inotify/options.go index d724e75c..2c48e5dc 100644 --- a/pkg/storage/filesystem/fileevents/inotify/options.go +++ b/pkg/storage/filesystem/fileevents/inotify/options.go @@ -26,7 +26,7 @@ type FileWatcherOptions struct { // Default: DefaultEventBufferSize EventBufferSize int32 // PathExcluder provides a way to exclude paths. - // Default: filesystem.ExcludeGitDirectory{} + // Default: filesystem.DefaultPathExcluders() PathExcluder filesystem.PathExcluder } @@ -54,6 +54,6 @@ func defaultOptions() *FileWatcherOptions { return &FileWatcherOptions{ BatchTimeout: 1 * time.Second, EventBufferSize: DefaultEventBufferSize, - PathExcluder: filesystem.ExcludeGitDirectory{}, + PathExcluder: filesystem.DefaultPathExcluders(), } } From 1d29942ba174fea8d5deec6f8e2cb48479133e41 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 27 Jan 2021 23:02:47 +0200 Subject: [PATCH 073/149] Add validation to the objectrecognizer --- pkg/storage/core/recognizer.go | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/pkg/storage/core/recognizer.go b/pkg/storage/core/recognizer.go index b837e54d..fac0fe12 100644 --- a/pkg/storage/core/recognizer.go +++ b/pkg/storage/core/recognizer.go @@ -3,6 +3,7 @@ package core import ( "context" "errors" + "fmt" "github.com/weaveworks/libgitops/pkg/serializer" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -21,6 +22,9 @@ var _ ObjectRecognizer = &SerializerObjectRecognizer{} type SerializerObjectRecognizer struct { // Serializer is a required field in order for ResolveObjectID to function. Serializer serializer.Serializer + // AllowUnrecognized controls whether this implementation allows recognizing + // GVK combinations not known to the underlying Scheme. Default: false + AllowUnrecognized bool } func (r *SerializerObjectRecognizer) ResolveObjectID(_ context.Context, _ string, content []byte) (ObjectID, error) { @@ -35,5 +39,20 @@ func (r *SerializerObjectRecognizer) ResolveObjectID(_ context.Context, _ string if err != nil { return nil, err } + // Validate the object info + gvk := metaObj.GroupVersionKind() + if gvk.Group == "" && gvk.Version == "" { + return nil, fmt.Errorf(".apiVersion field must not be empty") + } + if gvk.Kind == "" { + return nil, fmt.Errorf(".kind field must not be empty") + } + if metaObj.Kind == "" { + return nil, fmt.Errorf(".metadata.name field must not be empty") + } + if !r.AllowUnrecognized && !r.Serializer.Scheme().Recognizes(gvk) { + return nil, fmt.Errorf("GroupVersionKind %v not recognized by the scheme", gvk) + } + return NewObjectID(metaObj.GroupVersionKind(), ObjectKeyFromObject(metaObj)), nil } From a26e18b211ff6666bf6b69be8fda585c10a81092 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 27 Jan 2021 23:03:19 +0200 Subject: [PATCH 074/149] Fix race condition in Client.List --- pkg/storage/client/client.go | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) diff --git a/pkg/storage/client/client.go b/pkg/storage/client/client.go index 6ca3e328..9c216a07 100644 --- a/pkg/storage/client/client.go +++ b/pkg/storage/client/client.go @@ -10,6 +10,7 @@ import ( "github.com/weaveworks/libgitops/pkg/storage/backend" "github.com/weaveworks/libgitops/pkg/storage/core" patchutil "github.com/weaveworks/libgitops/pkg/util/patch" + syncutil "github.com/weaveworks/libgitops/pkg/util/sync" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -128,28 +129,27 @@ func (c *Generic) List(ctx context.Context, list core.ObjectList, opts ...client allIDs = append(allIDs, ids...) } - // TODO: Is this a good default? Need to balance mem usage and speed. This is prob. too much - ch := make(chan core.Object, len(allIDs)) - + // Populate objs through the given (non-buffered) channel + ch := make(chan core.Object) objs := make([]kruntime.Object, 0, len(allIDs)) - go func() { - for o := range ch { - objs = append(objs, o) - } - }() + // How should the object be created? createFunc := createObject(gvk, c.Backend().Scheme()) if serializer.IsPartialObjectList(list) { createFunc = createPartialObject(gvk) } else if serializer.IsUnstructuredList(list) { createFunc = createUnstructuredObject(gvk) } - // Start one goroutine per ID, and get back an aggregate error - err = c.processKeys(ctx, allIDs, &listOpts.FilterOptions, createFunc, ch) - // Always unconditionally stop the channel after this, we know there won't - // be any more writes to it. This will terminate the for-range loop above. - close(ch) - if err != nil { + // Temporary processing goroutine; execution starts instantly + m := syncutil.RunMonitor(func() error { + return c.processKeys(ctx, allIDs, &listOpts.FilterOptions, createFunc, ch) + }) + + for o := range ch { + objs = append(objs, o) + } + + if err := m.Wait(); err != nil { return err } @@ -283,6 +283,9 @@ func (c *Generic) processKeys(ctx context.Context, ids []core.UnversionedObjectI for _, id := range ids { goroutines = append(goroutines, c.processKey(ctx, id, filterOpts, fn, output)) } + + defer close(output) + return utilerrs.AggregateGoroutines(goroutines...) } From 47dec8e1a2df593ccfd7c955b47720254b470f6a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 27 Jan 2021 23:04:12 +0200 Subject: [PATCH 075/149] Check in the distributed interfaces and options. --- .../transactional/distributed/interfaces.go | 75 ++++++++++++++ .../transactional/distributed/options.go | 97 +++++++++++++++++++ 2 files changed, 172 insertions(+) create mode 100644 pkg/storage/client/transactional/distributed/interfaces.go create mode 100644 pkg/storage/client/transactional/distributed/options.go diff --git a/pkg/storage/client/transactional/distributed/interfaces.go b/pkg/storage/client/transactional/distributed/interfaces.go new file mode 100644 index 00000000..81105990 --- /dev/null +++ b/pkg/storage/client/transactional/distributed/interfaces.go @@ -0,0 +1,75 @@ +package distributed + +import ( + "context" + "time" + + "github.com/weaveworks/libgitops/pkg/storage/client/transactional" +) + +// Client is a client that can sync state with a remote in a transactional way. +type Client interface { + // The distributed Client extends the transactional Client + transactional.Client + // This Client is itself both a CommitHook and TransactionHook; these should + // be automatically registered with the transactional.Client's BranchManager + // in this Client's constructor. + transactional.CommitHook + transactional.TransactionHook + + // StartResyncLoop starts a resync loop for the given branches for + // the given interval. + // + // resyncCacheInterval specifies the interval for which resyncs + // (remote Pulls) should be run in the background. The duration must + // be positive, and non-zero. + // + // resyncBranches specifies what branches to resync. The default is + // []string{""}, i.e. only the "default" branch. + // + // ctx should be used to cancel the loop, if needed. + // + // While it is technically possible to start many of these resync + // loops, it is not recommended. Start it once, for all the branches + // you need. The branches will be pulled synchronously in order. The + // resync interval is non-sliding, which means that the interval + // includes the time of the operations. + StartResyncLoop(ctx context.Context, resyncCacheInterval time.Duration, resyncBranches ...string) + + // Remote exposes the underlying remote used + Remote() Remote +} + +type Remote interface { + // Push pushes the attached branch (of the ctx) to the remote. + // Push must block as long as the operation is in progress, but also + // respect the timeout set on ctx and return instantly after it expires. + // + // It is guaranteed that Pull() and Push() are never called racily at + // the same time for the same branch, BUT Pull() and Push() might be called + // at the same time in any order for distinct branches. If the underlying + // Remote transport only supports one "writer transport" to it at the same time, + // the Remote must coordinate pulls and pushes with a mutex internally. + Push(ctx context.Context) error + + // Pull pulls the attached branch (of the ctx) from the remote. + // Pull must block as long as the operation is in progress, but also + // respect the timeout set on ctx and return instantly after it expires. + // + // It is guaranteed that Pull() and Push() are never called racily at + // the same time for the same branch, BUT Pull() and Push() might be called + // at the same time in any order for distinct branches. If the underlying + // Remote transport only supports one "writer transport" to it at the same time, + // the Remote must coordinate pulls and pushes with a mutex internally. + Pull(ctx context.Context) error +} + +// LockableRemote describes a remote that supports locking a remote branch for writing. +type LockableRemote interface { + Remote + + // Lock locks the branch attached to the context for writing, for the given duration. + Lock(ctx context.Context, d time.Duration) error + // Unlock reverses the write lock created by Lock() + Unlock(ctx context.Context) error +} diff --git a/pkg/storage/client/transactional/distributed/options.go b/pkg/storage/client/transactional/distributed/options.go new file mode 100644 index 00000000..4640ce9a --- /dev/null +++ b/pkg/storage/client/transactional/distributed/options.go @@ -0,0 +1,97 @@ +package distributed + +import "time" + +// ClientOption is an interface for applying options to ClientOptions. +type ClientOption interface { + ApplyToClient(*ClientOptions) +} + +// ClientOptions specify options on how the distributed client should +// act according to the PACELC theorem. +// +// The following configurations correspond to the PACELC levels: +// +// PC/EC: CacheValidDuration == 0 && RemoteErrorStream == nil: +// This makes every read first do a remote Pull(), and fails +// critically if the Pull operation fails. Transactions fail +// if Push() fails. +// +// PC/EL: CacheValidDuration > 0 && RemoteErrorStream == nil: +// This makes a read do a remote Pull only if the delta between +// the last Pull and time.Now() exceeds CacheValidDuration. +// StartResyncLoop(resyncCacheInterval) can be used to +// periodically Pull in the background, so that the latency +// of reads are minimal. Transactions and reads fail if +// Push() or Pull() fail. +// +// PA/EL: RemoteErrorStream != nil: +// How often reads invoke Pull() is given by CacheValidDuration +// and StartResyncLoop(resyncCacheInterval) as per above. +// However, when a Pull() or Push() is invoked from a read or +// transaction, and a network partition happens, such errors are +// non-critical for the operation to succeed, as Availability is +// favored and cached objects are returned. +type ClientOptions struct { + // CacheValidDuration is the period of time the cache is still + // valid since its last resync (remote Pull). If set to 0; all + // reads will invoke a resync right before reading; as the cache + // is never valid. This option set to 0 favors Consistency over + // Availability. + // + // CacheValidDuration == 0 and RemoteErrorStream != nil must not + // be set at the same time; as they contradict. + // + // Default: 1m + CacheValidDuration time.Duration + // RemoteErrorStream specifies a stream in which to readirect + // errors from the remote, instead of returning them to the caller. + // This is useful for allowing "offline operation", and favoring + // Availability over Consistency when a Partition happens (i.e. + // the network is unreachable). In normal operation, remote Push/Pull + // errors would propagate to the caller and "fail" the Transaction, + // however, if that is not desired, those errors can be propagated + // here, and the caller will succeed with the transaction. + // Default: nil (optional) + RemoteErrorStream chan error + + // Default: 30s for all + LockTimeout time.Duration + PullTimeout time.Duration + PushTimeout time.Duration +} + +func (o *ClientOptions) ApplyToClient(target *ClientOptions) { + if o.CacheValidDuration != 0 { + target.CacheValidDuration = o.CacheValidDuration + } + if o.RemoteErrorStream != nil { + target.RemoteErrorStream = o.RemoteErrorStream + } + if o.LockTimeout != 0 { + target.LockTimeout = o.LockTimeout + } + if o.PullTimeout != 0 { + target.PullTimeout = o.PullTimeout + } + if o.PushTimeout != 0 { + target.PushTimeout = o.PushTimeout + } +} + +func (o *ClientOptions) ApplyOptions(opts []ClientOption) *ClientOptions { + for _, opt := range opts { + opt.ApplyToClient(o) + } + return o +} + +func defaultOptions() *ClientOptions { + return &ClientOptions{ + CacheValidDuration: 1 * time.Minute, + RemoteErrorStream: nil, + LockTimeout: 30 * time.Second, + PullTimeout: 30 * time.Second, + PushTimeout: 30 * time.Second, + } +} From c2583b68c4516acea9343e2f57fa6f6c551287c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 27 Jan 2021 23:04:23 +0200 Subject: [PATCH 076/149] Check in the distributed client. --- .../transactional/distributed/client.go | 313 ++++++++++++++++++ 1 file changed, 313 insertions(+) create mode 100644 pkg/storage/client/transactional/distributed/client.go diff --git a/pkg/storage/client/transactional/distributed/client.go b/pkg/storage/client/transactional/distributed/client.go new file mode 100644 index 00000000..665c6fdb --- /dev/null +++ b/pkg/storage/client/transactional/distributed/client.go @@ -0,0 +1,313 @@ +package distributed + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/sirupsen/logrus" + "github.com/weaveworks/libgitops/pkg/storage/client/transactional" + "github.com/weaveworks/libgitops/pkg/storage/core" + "k8s.io/apimachinery/pkg/util/wait" +) + +// NewClient creates a new distributed Client using the given underlying transactional Client, +// remote, and options that configure how the Client should respond to network partitions. +func NewClient(c transactional.Client, remote Remote, opts ...ClientOption) (*Generic, error) { + if c == nil { + return nil, fmt.Errorf("%w: c is mandatory", core.ErrInvalidParameter) + } + if remote == nil { + return nil, fmt.Errorf("%w: remote is mandatory", core.ErrInvalidParameter) + } + + o := defaultOptions().ApplyOptions(opts) + + g := &Generic{ + Client: c, + remote: remote, + opts: *o, + branchLocks: make(map[string]*branchLock), + branchLocksMu: &sync.Mutex{}, + } + + // Register ourselves to hook into the branch manager's operations + c.BranchManager().CommitHookChain().Register(g) + c.BranchManager().TransactionHookChain().Register(g) + + return g, nil +} + +type Generic struct { + transactional.Client + remote Remote + opts ClientOptions + // branchLocks maps a given branch to a given lock the state of the branch + branchLocks map[string]*branchLock + // branchLocksMu guards branchLocks + branchLocksMu *sync.Mutex +} + +type branchLock struct { + // mu should be write-locked whenever the branch is actively running any + // function from the remote + mu *sync.RWMutex + // lastPull is guarded by mu, before reading, one should RLock mu + lastPull time.Time +} + +func (c *Generic) Get(ctx context.Context, key core.ObjectKey, obj core.Object) error { + return c.readWhenPossible(ctx, func() error { + return c.Client.Get(ctx, key, obj) + }) +} + +func (c *Generic) List(ctx context.Context, list core.ObjectList, opts ...core.ListOption) error { + return c.readWhenPossible(ctx, func() error { + return c.Client.List(ctx, list, opts...) + }) +} + +func (c *Generic) readWhenPossible(ctx context.Context, operation func() error) error { + ref := core.GetVersionRef(ctx) + // If the ref is not writable, we don't have to worry about race conditions + if !ref.IsWritable() { + return operation() + } + branch := ref.String() + + // Check if we need to do a pull before + if c.needsResync(branch, c.opts.CacheValidDuration) { + // Try to pull the remote branch. If it fails, use returnErr to figure out if + // this (depending on the configured PACELC mode) is a critical error, or if we + // should continue with the read + if err := c.pull(ctx, branch); err != nil { + if criticalErr := c.returnErr(err); criticalErr != nil { + return criticalErr + } + } + } + // Do the read operation + return operation() +} + +func (c *Generic) getBranchLockInfo(branch string) *branchLock { + c.branchLocksMu.Lock() + defer c.branchLocksMu.Unlock() + + // Check if there exists a lock for that branch + info, ok := c.branchLocks[branch] + if ok { + return info + } + // Write to the branchLocks map + c.branchLocks[branch] = &branchLock{ + mu: &sync.RWMutex{}, + } + return c.branchLocks[branch] +} + +func (c *Generic) needsResync(branch string, d time.Duration) bool { + lck := c.getBranchLockInfo(branch) + // Lock while reading the last resync time + lck.mu.RLock() + defer lck.mu.RUnlock() + // Resync if there has been no sync so far, or if the last resync was too long ago + return lck.lastPull.IsZero() || time.Since(lck.lastPull) > d +} + +// StartResyncLoop starts a resync loop for the given branches for +// the given interval. +// +// resyncCacheInterval specifies the interval for which resyncs +// (remote Pulls) should be run in the background. The duration must +// be positive, and non-zero. +// +// resyncBranches specifies what branches to resync. The default is +// []string{""}, i.e. only the "default" branch. +// +// ctx should be used to cancel the loop, if needed. +// +// While it is technically possible to start many of these resync +// loops, it is not recommended. Start it once, for all the branches +// you need. The branches will be pulled synchronously in order. The +// resync interval is non-sliding, which means that the interval +// includes the time of the operations. +func (c *Generic) StartResyncLoop(ctx context.Context, resyncCacheInterval time.Duration, resyncBranches ...string) { + // Only start this loop if resyncCacheInterval > 0 + if resyncCacheInterval <= 0 { + logrus.Warn("No need to start the resync loop; resyncCacheInterval <= 0") + return + } + // If unset, only sync the default branch. + if resyncBranches == nil { + resyncBranches = []string{""} + } + + // Start the resync goroutine + go c.resyncLoop(ctx, resyncCacheInterval, resyncBranches) +} + +func (c *Generic) resyncLoop(ctx context.Context, resyncCacheInterval time.Duration, resyncBranches []string) { + logrus.Debug("Starting the resync loop...") + + wait.NonSlidingUntilWithContext(ctx, func(_ context.Context) { + + for _, branch := range resyncBranches { + logrus.Tracef("resyncLoop: Will perform pull operation on branch: %q", branch) + // Perform a fetch, pull & checkout of the new revision + if err := c.pull(ctx, branch); err != nil { + logrus.Errorf("resyncLoop: pull failed with error: %v", err) + return + } + } + }, resyncCacheInterval) + logrus.Info("Exiting the resync loop...") +} + +func (c *Generic) pull(ctx context.Context, branch string) error { + // Need to get the branch-specific lock variable + lck := c.getBranchLockInfo(branch) + // Write-lock while this operation is in progress + lck.mu.Lock() + defer lck.mu.Unlock() + + // Create a new context that times out after the given duration + pullCtx, cancel := context.WithTimeout(ctx, c.opts.PullTimeout) + defer cancel() + + // Make a ctx for the given branch + ctxForBranch := core.WithVersionRef(pullCtx, core.NewBranchRef(branch)) + if err := c.remote.Pull(ctxForBranch); err != nil { + return err + } + + // Register the timestamp into the lock + lck.lastPull = time.Now() + + // All good + return nil +} + +func (c *Generic) PreTransactionHook(ctx context.Context, info transactional.TxInfo) error { + // We count on ctx having the VersionRef registered for the head branch + + // Lock the branch for writing, if supported by the remote + // If the lock fails, we DO NOT try to pull, but just exit (either with err or a nil error, + // depending on the configured PACELC mode) + // TODO: Can we rely on the timeout being exact enough here? + // TODO: How to do this before the branch even exists...? + if err := c.lock(ctx, info.Options.Timeout); err != nil { + return c.returnErr(err) + } + + // Always Pull the _base_ branch before a transaction, to be up-to-date + // before creating the new head branch + if err := c.pull(ctx, info.Base); err != nil { + return c.returnErr(err) + } + + // All good + return nil +} + +func (c *Generic) PreCommitHook(ctx context.Context, commit transactional.Commit, info transactional.TxInfo) error { + return nil // nothing to do here +} + +func (c *Generic) PostCommitHook(ctx context.Context, _ transactional.Commit, _ transactional.TxInfo) error { + // Push the branch in the ctx + if err := c.push(ctx); err != nil { + return c.returnErr(err) + } + return nil +} + +func (c *Generic) PostTransactionHook(ctx context.Context, info transactional.TxInfo) error { + // Unlock the head branch, if supported + if err := c.unlock(ctx); err != nil { + return c.returnErr(err) + } + + return nil +} + +func (c *Generic) Remote() Remote { + return c.remote +} + +// note: this must ONLY be called from such functions where it is guaranteed that the +// ctx contains a branch versionref. +func (c *Generic) branchFromCtx(ctx context.Context) string { + return core.GetVersionRef(ctx).String() +} + +func (c *Generic) returnErr(err error) error { + // If RemoteErrorStream isn't defined, just pass the error through + if c.opts.RemoteErrorStream == nil { + return err + } + // Non-blocking send to the channel, and no return error + go func() { + c.opts.RemoteErrorStream <- err + }() + return nil +} + +func (c *Generic) lock(ctx context.Context, d time.Duration) error { + lr, ok := c.remote.(LockableRemote) + if !ok { + return nil + } + + // Need to get the branch-specific lock variable + lck := c.getBranchLockInfo(c.branchFromCtx(ctx)) + // Write-lock while this operation is in progress + lck.mu.Lock() + defer lck.mu.Unlock() + + // Enforce a timeout + lockCtx, cancel := context.WithTimeout(ctx, c.opts.LockTimeout) + defer cancel() + + return lr.Lock(lockCtx, d) +} + +func (c *Generic) unlock(ctx context.Context) error { + lr, ok := c.remote.(LockableRemote) + if !ok { + return nil + } + + // Need to get the branch-specific lock variable + lck := c.getBranchLockInfo(c.branchFromCtx(ctx)) + // Write-lock while this operation is in progress + lck.mu.Lock() + defer lck.mu.Unlock() + + // Enforce a timeout + unlockCtx, cancel := context.WithTimeout(ctx, c.opts.LockTimeout) + defer cancel() + + return lr.Unlock(unlockCtx) +} + +func (c *Generic) push(ctx context.Context) error { + // Need to get the branch-specific lock variable + lck := c.getBranchLockInfo(c.branchFromCtx(ctx)) + // Write-lock while this operation is in progress + lck.mu.Lock() + defer lck.mu.Unlock() + + // Create a new context that times out after the given duration + pushCtx, cancel := context.WithTimeout(ctx, c.opts.PushTimeout) + defer cancel() + + // Push the head branch using the remote + // If the Push fails, don't execute any other later statements + if err := c.remote.Push(pushCtx); err != nil { + return err + } + return nil +} From 414e62f85c98b96aac01bf944bd59fff8e88fad4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 27 Jan 2021 23:33:26 +0200 Subject: [PATCH 077/149] Move place for the GitDirectory; now it is a branchmanager and remote. --- pkg/gitdir/gitdir.go | 474 ------------------ .../transactional/distributed/git/git.go | 368 ++++++++++++++ .../distributed/git}/transport.go | 2 +- 3 files changed, 369 insertions(+), 475 deletions(-) delete mode 100644 pkg/gitdir/gitdir.go create mode 100644 pkg/storage/client/transactional/distributed/git/git.go rename pkg/{gitdir => storage/client/transactional/distributed/git}/transport.go (99%) diff --git a/pkg/gitdir/gitdir.go b/pkg/gitdir/gitdir.go deleted file mode 100644 index a9eb0b70..00000000 --- a/pkg/gitdir/gitdir.go +++ /dev/null @@ -1,474 +0,0 @@ -package gitdir - -import ( - "context" - "errors" - "fmt" - "io/ioutil" - "os" - "sync" - "time" - - "github.com/fluxcd/go-git-providers/gitprovider" - git "github.com/go-git/go-git/v5" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/object" - log "github.com/sirupsen/logrus" - "k8s.io/apimachinery/pkg/util/wait" -) - -var ( - // ErrNotStarted happens if you try to operate on the gitDirectory before you have started - // it with StartCheckoutLoop. - ErrNotStarted = errors.New("the gitDirectory hasn't been started (and hence, cloned) yet") - // ErrCannotWriteToReadOnly happens if you try to do a write operation for a non-authenticated Git repo. - ErrCannotWriteToReadOnly = errors.New("the gitDirectory is read-only, cannot write") -) - -const ( - defaultBranch = "master" - defaultRemote = "origin" - defaultInterval = 30 * time.Second - defaultTimeout = 1 * time.Minute -) - -// GitDirectoryOptions provides options for the gitDirectory. -// TODO: Refactor this into the controller-runtime Options factory pattern. -type GitDirectoryOptions struct { - // Options - Branch string // default "master" - Interval time.Duration // default 30s - Timeout time.Duration // default 1m - // TODO: Support folder prefixes - - // Authentication - AuthMethod AuthMethod -} - -func (o *GitDirectoryOptions) Default() { - if o.Branch == "" { - o.Branch = defaultBranch - } - if o.Interval == 0 { - o.Interval = defaultInterval - } - if o.Timeout == 0 { - o.Timeout = defaultTimeout - } -} - -// GitDirectory is an abstraction layer for a temporary Git clone. It pulls -// and checks out new changes periodically in the background. It also allows -// high-level access to write operations, like creating a new branch, committing, -// and pushing. -type GitDirectory interface { - // Dir returns the backing temporary directory of the git clone. - Dir() string - // MainBranch returns the configured main branch. - MainBranch() string - // RepositoryRef returns the repository reference. - RepositoryRef() gitprovider.RepositoryRef - - // StartCheckoutLoop clones the repo synchronously, and then starts the checkout loop non-blocking. - // If the checkout loop has been started already, this is a no-op. - StartCheckoutLoop() error - // Suspend waits for any pending transactions or operations, and then locks the internal mutex so that - // no other operations can start. This means the periodic background checkout loop will momentarily stop. - Suspend() - // Resume unlocks the mutex locked in Suspend(), so that other Git operations, like the background checkout - // loop can resume its operation. - Resume() - - // Pull performs a pull & checkout to the latest revision. - // ErrNotStarted is returned if the repo hasn't been cloned yet. - Pull(ctx context.Context) error - - // CheckoutNewBranch creates a new branch and checks out to it. - // ErrNotStarted is returned if the repo hasn't been cloned yet. - CheckoutNewBranch(branchName string) error - // CheckoutMainBranch goes back to the main branch. - // ErrNotStarted is returned if the repo hasn't been cloned yet. - CheckoutMainBranch() error - - // Commit creates a commit of all changes in the current worktree with the given parameters. - // It also automatically pushes the branch after the commit. - // ErrNotStarted is returned if the repo hasn't been cloned yet. - // ErrCannotWriteToReadOnly is returned if opts.AuthMethod wasn't provided. - Commit(ctx context.Context, authorName, authorEmail, msg string) error - // CommitChannel is a channel to where new observed Git SHAs are written. - CommitChannel() chan string - - // Cleanup terminates any pending operations, and removes the temporary directory. - Cleanup() error -} - -// Create a new GitDirectory implementation. In order to start using this, run StartCheckoutLoop(). -func NewGitDirectory(repoRef gitprovider.RepositoryRef, opts GitDirectoryOptions) (GitDirectory, error) { - log.Info("Initializing the Git repo...") - - // Default the options - opts.Default() - - // Create a temporary directory for the clone - tmpDir, err := ioutil.TempDir("", "libgitops") - if err != nil { - return nil, err - } - log.Debugf("Created temporary directory for the git clone at %q", tmpDir) - - d := &gitDirectory{ - repoRef: repoRef, - GitDirectoryOptions: opts, - cloneDir: tmpDir, - // TODO: This needs to be large, otherwise it can start blocking unnecessarily if nobody reads it - commitChan: make(chan string, 1024), - lock: &sync.Mutex{}, - } - // Set up the parent context for this class. d.cancel() is called only at Cleanup() - d.ctx, d.cancel = context.WithCancel(context.Background()) - - log.Trace("URL endpoint parsed and authentication method chosen") - - if d.canWrite() { - log.Infof("Running in read-write mode, will commit back current status to the repo") - } else { - log.Infof("Running in read-only mode, won't write status back to the repo") - } - - return d, nil -} - -// gitDirectory is an implementation which keeps a directory -type gitDirectory struct { - // user-specified options - repoRef gitprovider.RepositoryRef - GitDirectoryOptions - - // the temporary directory used for the clone - cloneDir string - - // go-git objects. wt is the worktree of the repo, persistent during the lifetime of repo. - repo *git.Repository - wt *git.Worktree - - // latest known commit to the system - lastCommit string - // events channel from new commits - commitChan chan string - - // the context and its cancel function for the lifetime of this struct (until Cleanup()) - ctx context.Context - cancel context.CancelFunc - // the lock for git operations (so pushing and pulling aren't done simultaneously) - lock *sync.Mutex -} - -func (d *gitDirectory) Dir() string { - return d.cloneDir -} - -func (d *gitDirectory) MainBranch() string { - return d.Branch -} - -func (d *gitDirectory) RepositoryRef() gitprovider.RepositoryRef { - return d.repoRef -} - -// StartCheckoutLoop clones the repo synchronously, and then starts the checkout loop non-blocking. -// If the checkout loop has been started already, this is a no-op. -func (d *gitDirectory) StartCheckoutLoop() error { - if d.wt != nil { - return nil // already initialized - } - // First, clone the repo - if err := d.clone(); err != nil { - return err - } - go d.checkoutLoop() - return nil -} - -func (d *gitDirectory) Suspend() { - d.lock.Lock() -} - -func (d *gitDirectory) Resume() { - d.lock.Unlock() -} - -func (d *gitDirectory) CommitChannel() chan string { - return d.commitChan -} - -func (d *gitDirectory) checkoutLoop() { - log.Info("Starting the checkout loop...") - - wait.NonSlidingUntilWithContext(d.ctx, func(_ context.Context) { - - log.Trace("checkoutLoop: Will perform pull operation") - // Perform a pull & checkout of the new revision - if err := d.Pull(d.ctx); err != nil { - log.Errorf("checkoutLoop: git pull failed with error: %v", err) - return - } - - }, d.Interval) - log.Info("Exiting the checkout loop...") -} - -func (d *gitDirectory) cloneURL() string { - return d.repoRef.GetCloneURL(d.AuthMethod.TransportType()) -} - -func (d *gitDirectory) canWrite() bool { - return d.AuthMethod != nil -} - -// verifyRead makes sure it's ok to start a read-something-from-git process -func (d *gitDirectory) verifyRead() error { - // Safeguard against not starting yet - if d.wt == nil { - return fmt.Errorf("cannot pull: %w", ErrNotStarted) - } - return nil -} - -// verifyWrite makes sure it's ok to start a write-something-to-git process -func (d *gitDirectory) verifyWrite() error { - // We need all read privileges first - if err := d.verifyRead(); err != nil { - return err - } - // Make sure we don't write to a possibly read-only repo - if !d.canWrite() { - return ErrCannotWriteToReadOnly - } - return nil -} - -func (d *gitDirectory) clone() error { - // Lock the mutex now that we're starting, and unlock it when exiting - d.lock.Lock() - defer d.lock.Unlock() - - log.Infof("Starting to clone the repository %s with timeout %s", d.repoRef, d.Timeout) - // Do a clone operation to the temporary directory, with a timeout - err := d.contextWithTimeout(d.ctx, func(ctx context.Context) error { - var err error - d.repo, err = git.PlainCloneContext(ctx, d.Dir(), false, &git.CloneOptions{ - URL: d.cloneURL(), - Auth: d.AuthMethod, - RemoteName: defaultRemote, - ReferenceName: plumbing.NewBranchReferenceName(d.Branch), - SingleBranch: true, - NoCheckout: false, - //Depth: 1, // ref: https://github.com/src-d/go-git/issues/1143 - RecurseSubmodules: 0, - Progress: nil, - Tags: git.NoTags, - }) - return err - }) - // Handle errors - switch err { - case nil: - // no-op, just continue. - case context.DeadlineExceeded: - return fmt.Errorf("git clone operation took longer than deadline %s", d.Timeout) - case context.Canceled: - log.Tracef("context was cancelled") - return nil // if Cleanup() was called, just exit the goroutine - default: - return fmt.Errorf("git clone error: %v", err) - } - - // Populate the worktree pointer - d.wt, err = d.repo.Worktree() - if err != nil { - return fmt.Errorf("git get worktree error: %v", err) - } - - // Get the latest HEAD commit and report it to the user - ref, err := d.repo.Head() - if err != nil { - return err - } - - d.observeCommit(ref.Hash()) - return nil -} - -func (d *gitDirectory) Pull(ctx context.Context) error { - // Lock the mutex now that we're starting, and unlock it when exiting - d.lock.Lock() - defer d.lock.Unlock() - - // Make sure it's okay to read - if err := d.verifyRead(); err != nil { - return err - } - - // Perform the git pull operation using the timeout - err := d.contextWithTimeout(ctx, func(innerCtx context.Context) error { - log.Trace("checkoutLoop: Starting pull operation") - return d.wt.PullContext(innerCtx, &git.PullOptions{ - Auth: d.AuthMethod, - SingleBranch: true, - }) - }) - // Handle errors - switch err { - case nil, git.NoErrAlreadyUpToDate: - // no-op, just continue. Allow the git.NoErrAlreadyUpToDate error - case context.DeadlineExceeded: - return fmt.Errorf("git pull operation took longer than deadline %s", d.Timeout) - case context.Canceled: - log.Tracef("context was cancelled") - return nil // if Cleanup() was called, just exit the goroutine - default: - return fmt.Errorf("failed to pull: %v", err) - } - - log.Trace("checkoutLoop: Pulled successfully") - - // get current head - ref, err := d.repo.Head() - if err != nil { - return err - } - - // check if we changed commits - if d.lastCommit != ref.Hash().String() { - // Notify upstream that we now have a new commit, and allow writing again - d.observeCommit(ref.Hash()) - } - - return nil -} - -func (d *gitDirectory) CheckoutNewBranch(branchName string) error { - // Make sure it's okay to write - if err := d.verifyWrite(); err != nil { - return err - } - - return d.wt.Checkout(&git.CheckoutOptions{ - Branch: plumbing.NewBranchReferenceName(branchName), - Create: true, - }) -} - -func (d *gitDirectory) CheckoutMainBranch() error { - // Make sure it's okay to write - if err := d.verifyWrite(); err != nil { - return err - } - - // Best-effort clean - _ = d.wt.Clean(&git.CleanOptions{ - Dir: true, - }) - // Force-checkout the main branch - return d.wt.Checkout(&git.CheckoutOptions{ - Branch: plumbing.NewBranchReferenceName(d.Branch), - Force: true, - }) -} - -// observeCommit sets the lastCommit variable so that we know the latest state -func (d *gitDirectory) observeCommit(commit plumbing.Hash) { - d.lastCommit = commit.String() - d.commitChan <- commit.String() - log.Infof("New commit observed on branch %q: %s", d.Branch, commit) -} - -// Commit creates a commit of all changes in the current worktree with the given parameters. -// It also automatically pushes the branch after the commit. -// ErrNotStarted is returned if the repo hasn't been cloned yet. -// ErrCannotWriteToReadOnly is returned if opts.AuthMethod wasn't provided. -func (d *gitDirectory) Commit(ctx context.Context, authorName, authorEmail, msg string) error { - // Make sure it's okay to write - if err := d.verifyWrite(); err != nil { - return err - } - - s, err := d.wt.Status() - if err != nil { - return fmt.Errorf("git status failed: %v", err) - } - if s.IsClean() { - log.Debugf("No changed files in git repo, nothing to commit...") - return nil - } - - // Do a commit and push - log.Debug("commitLoop: Committing all local changes") - hash, err := d.wt.Commit(msg, &git.CommitOptions{ - All: true, - Author: &object.Signature{ - Name: authorName, - Email: authorEmail, - When: time.Now(), - }, - }) - if err != nil { - return fmt.Errorf("git commit error: %v", err) - } - - // Perform the git push operation using the timeout - err = d.contextWithTimeout(ctx, func(innerCtx context.Context) error { - log.Debug("commitLoop: Will push with timeout") - return d.repo.PushContext(innerCtx, &git.PushOptions{ - Auth: d.AuthMethod, - }) - }) - // Handle errors - switch err { - case nil, git.NoErrAlreadyUpToDate: - // no-op, just continue. Allow the git.NoErrAlreadyUpToDate error - case context.DeadlineExceeded: - return fmt.Errorf("git push operation took longer than deadline %s", d.Timeout) - case context.Canceled: - log.Tracef("context was cancelled") - return nil // if Cleanup() was called, just exit the goroutine - default: - return fmt.Errorf("failed to push: %v", err) - } - - // Notify upstream that we now have a new commit, and allow writing again - log.Infof("A new commit with the actual state has been created and pushed to the origin: %q", hash) - d.observeCommit(hash) - return nil -} - -func (d *gitDirectory) contextWithTimeout(ctx context.Context, fn func(context.Context) error) error { - // Create a new context with a timeout. The push operation either succeeds in time, times out, - // or is cancelled by Cleanup(). In case of a successful run, the context is always cancelled afterwards. - ctx, cancel := context.WithTimeout(ctx, d.Timeout) - defer cancel() - - // Run the function using the context and cancel directly afterwards - fnErr := fn(ctx) - - // Return the context error, if any, first so deadline/cancel signals can propagate. - // Otherwise passthrough the error returned from the function. - if ctx.Err() != nil { - log.Debugf("operation context yielded error %v to be returned. Function error was: %v", ctx.Err(), fnErr) - return ctx.Err() - } - return fnErr -} - -// Cleanup cancels running goroutines and operations, and removes the temporary clone directory -func (d *gitDirectory) Cleanup() error { - // Cancel the context for the two running goroutines, and any possible long-running operations - d.cancel() - - // Remove the temporary directory - if err := os.RemoveAll(d.Dir()); err != nil { - log.Errorf("Failed to clean up temp git directory: %v", err) - return err - } - return nil -} diff --git a/pkg/storage/client/transactional/distributed/git/git.go b/pkg/storage/client/transactional/distributed/git/git.go new file mode 100644 index 00000000..53cf157f --- /dev/null +++ b/pkg/storage/client/transactional/distributed/git/git.go @@ -0,0 +1,368 @@ +package git + +import ( + "context" + "errors" + "fmt" + "io/ioutil" + "os" + "sync" + "time" + + "github.com/fluxcd/go-git-providers/gitprovider" + git "github.com/go-git/go-git/v5" + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/object" + log "github.com/sirupsen/logrus" + "github.com/weaveworks/libgitops/pkg/storage/client/transactional" + "github.com/weaveworks/libgitops/pkg/storage/client/transactional/distributed" +) + +var ( + // ErrNotStarted happens if you try to operate on the LocalClone before you have started + // it with StartCheckoutLoop. + ErrNotStarted = errors.New("the LocalClone hasn't been started (and hence, cloned) yet") + // ErrCannotWriteToReadOnly happens if you try to do a write operation for a non-authenticated Git repo. + ErrCannotWriteToReadOnly = errors.New("the LocalClone is read-only, cannot write") +) + +const ( + defaultBranch = "master" +) + +// LocalCloneOptions provides options for the LocalClone. +// TODO: Refactor this into the controller-runtime Options factory pattern. +type LocalCloneOptions struct { + Branch string // default "master" + + // Authentication method. If unspecified, this clone is read-only. + AuthMethod AuthMethod +} + +func (o *LocalCloneOptions) Default() { + if o.Branch == "" { + o.Branch = defaultBranch + } +} + +// LocalClone is an implementation of both a Remote, and a BranchManager, for Git. +var _ transactional.BranchManager = &LocalClone{} +var _ distributed.Remote = &LocalClone{} + +// Create a new Remote and BranchManager implementation using Git. The repo is cloned immediately +// in the constructor, you can use ctx to enforce a timeout for the clone. +func NewLocalClone(ctx context.Context, repoRef gitprovider.RepositoryRef, opts LocalCloneOptions) (*LocalClone, error) { + log.Info("Initializing the Git repo...") + + // Default the options + opts.Default() + + // Create a temporary directory for the clone + tmpDir, err := ioutil.TempDir("", "libgitops") + if err != nil { + return nil, err + } + log.Debugf("Created temporary directory for the git clone at %q", tmpDir) + + d := &LocalClone{ + repoRef: repoRef, + opts: opts, + cloneDir: tmpDir, + lock: &sync.Mutex{}, + commitHooks: &transactional.MultiCommitHook{}, + txHooks: &transactional.MultiTransactionHook{}, + } + + log.Trace("URL endpoint parsed and authentication method chosen") + + if d.canWrite() { + log.Infof("Running in read-write mode, will commit back current status to the repo") + } else { + log.Infof("Running in read-only mode, won't write status back to the repo") + } + + // Clone the repo + if err := d.clone(ctx); err != nil { + return nil, err + } + + return d, nil +} + +// LocalClone is an implementation of both a Remote, and a BranchManager, for Git. +type LocalClone struct { + // user-specified options + repoRef gitprovider.RepositoryRef + opts LocalCloneOptions + + // the temporary directory used for the clone + cloneDir string + + // go-git objects. wt is the worktree of the repo, persistent during the lifetime of repo. + repo *git.Repository + wt *git.Worktree + + // the lock for git operations (so no ops are done simultaneously) + lock *sync.Mutex + + commitHooks transactional.CommitHookChain + txHooks transactional.TransactionHookChain +} + +func (d *LocalClone) CommitHookChain() transactional.CommitHookChain { + return d.commitHooks +} + +func (d *LocalClone) TransactionHookChain() transactional.TransactionHookChain { + return d.txHooks +} + +func (d *LocalClone) Dir() string { + return d.cloneDir +} + +func (d *LocalClone) MainBranch() string { + return d.opts.Branch +} + +func (d *LocalClone) RepositoryRef() gitprovider.RepositoryRef { + return d.repoRef +} + +func (d *LocalClone) canWrite() bool { + return d.opts.AuthMethod != nil +} + +// verifyRead makes sure it's ok to start a read-something-from-git process +func (d *LocalClone) verifyRead() error { + // Safeguard against not starting yet + if d.wt == nil { + return fmt.Errorf("cannot pull: %w", ErrNotStarted) + } + return nil +} + +// verifyWrite makes sure it's ok to start a write-something-to-git process +func (d *LocalClone) verifyWrite() error { + // We need all read privileges first + if err := d.verifyRead(); err != nil { + return err + } + // Make sure we don't write to a possibly read-only repo + if !d.canWrite() { + return ErrCannotWriteToReadOnly + } + return nil +} + +func (d *LocalClone) clone(ctx context.Context) error { + // Lock the mutex now that we're starting, and unlock it when exiting + d.lock.Lock() + defer d.lock.Unlock() + + cloneURL := d.repoRef.GetCloneURL(d.opts.AuthMethod.TransportType()) + + log.Infof("Starting to clone the repository %s", d.repoRef) + // Do a clone operation to the temporary directory + var err error + d.repo, err = git.PlainCloneContext(ctx, d.Dir(), false, &git.CloneOptions{ + URL: cloneURL, + Auth: d.opts.AuthMethod, + ReferenceName: plumbing.NewBranchReferenceName(d.opts.Branch), + SingleBranch: true, + NoCheckout: false, + //Depth: 1, // ref: https://github.com/src-d/go-git/issues/1143 + RecurseSubmodules: 0, + Progress: nil, + Tags: git.NoTags, + }) + // Handle errors + if errors.Is(err, context.DeadlineExceeded) { + return fmt.Errorf("git clone operation timed out: %w", err) + } else if errors.Is(err, context.Canceled) { + return fmt.Errorf("git clone was cancelled: %w", err) + } else if err != nil { + return fmt.Errorf("git clone error: %v", err) + } + + // Populate the worktree pointer + d.wt, err = d.repo.Worktree() + if err != nil { + return fmt.Errorf("git get worktree error: %v", err) + } + + // Get the latest HEAD commit and report it to the user + ref, err := d.repo.Head() + if err != nil { + return err + } + + log.Infof("Repo cloned; HEAD commit is %s", ref.Hash()) + return nil +} + +func (d *LocalClone) Pull(ctx context.Context) error { + // Lock the mutex now that we're starting, and unlock it when exiting + d.lock.Lock() + defer d.lock.Unlock() + + // TODO: This should support doing Fetch() only maybe + // TODO: Remove the requirement to actually be on the branch + // that is being pulled. + + // Make sure it's okay to read + if err := d.verifyRead(); err != nil { + return err + } + + // Perform the git pull operation. The context carries a timeout + log.Trace("Starting pull operation") + err := d.wt.PullContext(ctx, &git.PullOptions{ + Auth: d.opts.AuthMethod, + SingleBranch: true, + }) + + // Handle errors + if errors.Is(err, git.NoErrAlreadyUpToDate) { + // all good, nothing more to do + log.Trace("Pull already up-to-date") + return nil + } else if errors.Is(err, context.DeadlineExceeded) { + return fmt.Errorf("git pull operation timed out: %w", err) + } else if errors.Is(err, context.Canceled) { + return fmt.Errorf("git pull was cancelled: %w", err) + } else if err != nil { + return fmt.Errorf("git pull error: %v", err) + } + + log.Trace("Pulled successfully") + + // Get current HEAD + ref, err := d.repo.Head() + if err != nil { + return err + } + + log.Infof("New commit observed %s", ref.Hash()) + return nil +} + +func (d *LocalClone) Push(ctx context.Context) error { + // TODO: Push a specific branch only. Use opts.RefSpecs? + + // Perform the git push operation. The context carries a timeout + log.Debug("Starting push operation") + err := d.repo.PushContext(ctx, &git.PushOptions{ + Auth: d.opts.AuthMethod, + }) + + // Handle errors + if errors.Is(err, git.NoErrAlreadyUpToDate) { + // TODO: Is it good if there's nothing more to do; or a failure if there's nothing to push? + log.Trace("Push already up-to-date") + return nil + } else if errors.Is(err, context.DeadlineExceeded) { + return fmt.Errorf("git push operation timed out: %w", err) + } else if errors.Is(err, context.Canceled) { + return fmt.Errorf("git push was cancelled: %w", err) + } else if err != nil { + return fmt.Errorf("git push error: %v", err) + } + + log.Trace("Pushed successfully") + + return nil +} + +func (d *LocalClone) CreateBranch(_ context.Context, branch string) error { + // Lock the mutex now that we're starting, and unlock it when exiting + d.lock.Lock() + defer d.lock.Unlock() + + // TODO: Should the caller do a force-reset using ResetToCleanBranch before creating the branch? + + // Make sure it's okay to write + if err := d.verifyWrite(); err != nil { + return err + } + + return d.wt.Checkout(&git.CheckoutOptions{ + Branch: plumbing.NewBranchReferenceName(branch), + Create: true, + }) +} + +func (d *LocalClone) ResetToCleanBranch(_ context.Context, branch string) error { + // Lock the mutex now that we're starting, and unlock it when exiting + d.lock.Lock() + defer d.lock.Unlock() + + // Make sure it's okay to write + if err := d.verifyWrite(); err != nil { + return err + } + + // Best-effort clean + _ = d.wt.Clean(&git.CleanOptions{ + Dir: true, + }) + // Force-checkout the main branch + return d.wt.Checkout(&git.CheckoutOptions{ + Branch: plumbing.NewBranchReferenceName(branch), + Force: true, + }) + // TODO: Do a pull here too? +} + +// Commit creates a commit of all changes in the current worktree with the given parameters. +// It also automatically pushes the branch after the commit. +// ErrNotStarted is returned if the repo hasn't been cloned yet. +// ErrCannotWriteToReadOnly is returned if opts.AuthMethod wasn't provided. +func (d *LocalClone) Commit(ctx context.Context, commit transactional.Commit) error { + // Lock the mutex now that we're starting, and unlock it when exiting + d.lock.Lock() + defer d.lock.Unlock() + + // Make sure it's okay to write + if err := d.verifyWrite(); err != nil { + return err + } + + s, err := d.wt.Status() + if err != nil { + return fmt.Errorf("git status failed: %v", err) + } + if s.IsClean() { + log.Debugf("No changed files in git repo, nothing to commit...") + // TODO: Should this be an error instead? + return nil + } + + // Do a commit + log.Debug("Committing all local changes") + hash, err := d.wt.Commit(commit.GetMessage().String(), &git.CommitOptions{ + All: true, + Author: &object.Signature{ + Name: commit.GetAuthor().GetName(), + Email: commit.GetAuthor().GetEmail(), + When: time.Now(), + }, + }) + if err != nil { + return fmt.Errorf("git commit error: %v", err) + } + + // Notify upstream that we now have a new commit, and allow writing again + log.Infof("A new commit has been created: %q", hash) + return nil +} + +// Cleanup cancels running goroutines and operations, and removes the temporary clone directory +func (d *LocalClone) Cleanup() error { + // Remove the temporary directory + if err := os.RemoveAll(d.Dir()); err != nil { + log.Errorf("Failed to clean up temp git directory: %v", err) + return err + } + return nil +} diff --git a/pkg/gitdir/transport.go b/pkg/storage/client/transactional/distributed/git/transport.go similarity index 99% rename from pkg/gitdir/transport.go rename to pkg/storage/client/transactional/distributed/git/transport.go index 408c0b2d..3017853a 100644 --- a/pkg/gitdir/transport.go +++ b/pkg/storage/client/transactional/distributed/git/transport.go @@ -1,4 +1,4 @@ -package gitdir +package git import ( "errors" From d3894bacb5cc96a5f39bfc81606037ef4f6a8add Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 27 Jan 2021 23:34:54 +0200 Subject: [PATCH 078/149] Move a part of the PullRequest structs to the Github PR package; and make the GitHub PR functionality a hook after the branch has been pushed. --- .../distributed/git/github/github.go | 182 ++++++++++++++++++ pkg/storage/transaction/pullrequest.go | 130 ------------- .../transaction/pullrequest/github/github.go | 119 ------------ 3 files changed, 182 insertions(+), 249 deletions(-) create mode 100644 pkg/storage/client/transactional/distributed/git/github/github.go delete mode 100644 pkg/storage/transaction/pullrequest.go delete mode 100644 pkg/storage/transaction/pullrequest/github/github.go diff --git a/pkg/storage/client/transactional/distributed/git/github/github.go b/pkg/storage/client/transactional/distributed/git/github/github.go new file mode 100644 index 00000000..23a20128 --- /dev/null +++ b/pkg/storage/client/transactional/distributed/git/github/github.go @@ -0,0 +1,182 @@ +package github + +import ( + "context" + "errors" + "fmt" + + "github.com/fluxcd/go-git-providers/github" + "github.com/fluxcd/go-git-providers/gitprovider" + "github.com/fluxcd/go-git-providers/validation" + gogithub "github.com/google/go-github/v32/github" + "github.com/sirupsen/logrus" + "github.com/weaveworks/libgitops/pkg/storage/client/transactional" +) + +// PullRequest can be returned from a TransactionFunc instead of a CommitResult, if +// a PullRequest is desired to be created by the PullRequestProvider. +type PullRequest interface { + // PullRequestResult is a superset of CommitResult + transactional.Commit + + // GetLabels specifies what labels should be applied on the PR. + // +optional + GetLabels() []string + // GetAssignees specifies what user login names should be assigned to this PR. + // Note: Only users with "pull" access or more can be assigned. + // +optional + GetAssignees() []string + // GetMilestone specifies what milestone this should be attached to. + // +optional + GetMilestone() string +} + +// GenericPullRequest implements PullRequest. +var _ PullRequest = GenericPullRequest{} + +// GenericPullRequest implements PullRequest. +type GenericPullRequest struct { + // GenericPullRequest is a superset of a Commit. + transactional.Commit + + // Labels specifies what labels should be applied on the PR. + // +optional + Labels []string + // Assignees specifies what user login names should be assigned to this PR. + // Note: Only users with "pull" access or more can be assigned. + // +optional + Assignees []string + // Milestone specifies what milestone this should be attached to. + // +optional + Milestone string +} + +func (r GenericPullRequest) GetLabels() []string { return r.Labels } +func (r GenericPullRequest) GetAssignees() []string { return r.Assignees } +func (r GenericPullRequest) GetMilestone() string { return r.Milestone } + +func (r GenericPullRequest) Validate() error { + v := validation.New("GenericPullRequest") + // Just validate the "inner" object + v.Append(r.Commit.Validate(), r.Commit, "Commit") + return v.Error() +} + +// TODO: This package should really only depend on go-git-providers' abstraction interface + +var ErrProviderNotSupported = errors.New("only the Github go-git-providers provider is supported at the moment") + +// NewGitHubPRCommitHandler returns a new transactional.CommitHandler from a gitprovider.Client. +func NewGitHubPRCommitHandler(c gitprovider.Client, repoRef gitprovider.RepositoryRef) (transactional.CommitHook, error) { + // Make sure a Github client was passed + if c.ProviderID() != github.ProviderID { + return nil, ErrProviderNotSupported + } + return &prCreator{c, repoRef}, nil +} + +type prCreator struct { + c gitprovider.Client + repoRef gitprovider.RepositoryRef +} + +func (c *prCreator) PreCommitHook(ctx context.Context, commit transactional.Commit, info transactional.TxInfo) error { + return nil +} + +func (c *prCreator) PostCommitHook(ctx context.Context, commit transactional.Commit, info transactional.TxInfo) error { + // First, validate the input + if err := commit.Validate(); err != nil { + return fmt.Errorf("given transactional.Commit wasn't valid") + } + + prCommit, ok := commit.(PullRequest) + if !ok { + return nil + } + + // Use the "raw" go-github client to do this + ghClient := c.c.Raw().(*gogithub.Client) + + // Helper variables + owner := c.repoRef.GetIdentity() + repo := c.repoRef.GetRepository() + var body *string + if commit.GetMessage().GetDescription() != "" { + body = gogithub.String(commit.GetMessage().GetDescription()) + } + + // Create the Pull Request + prPayload := &gogithub.NewPullRequest{ + Head: gogithub.String(info.Head), + Base: gogithub.String(info.Base), + Title: gogithub.String(commit.GetMessage().GetTitle()), + Body: body, + } + logrus.Infof("GitHub PR payload: %+v", prPayload) + pr, _, err := ghClient.PullRequests.Create(ctx, owner, repo, prPayload) + if err != nil { + return err + } + + // If spec.GetMilestone() is set, fetch the ID of the milestone + // Only set milestoneID to non-nil if specified + var milestoneID *int + if len(prCommit.GetMilestone()) != 0 { + milestoneID, err = getMilestoneID(ctx, ghClient, owner, repo, prCommit.GetMilestone()) + if err != nil { + return err + } + } + + // Only set assignees to non-nil if specified + var assignees *[]string + if a := prCommit.GetAssignees(); len(a) != 0 { + assignees = &a + } + + // Only set labels to non-nil if specified + var labels *[]string + if l := prCommit.GetLabels(); len(l) != 0 { + labels = &l + } + + // Only PATCH the PR if any of the fields were set + if milestoneID != nil || assignees != nil || labels != nil { + _, _, err := ghClient.Issues.Edit(ctx, owner, repo, pr.GetNumber(), &gogithub.IssueRequest{ + Milestone: milestoneID, + Assignees: assignees, + Labels: labels, + }) + if err != nil { + return err + } + } + + return nil +} + +func getMilestoneID(ctx context.Context, c *gogithub.Client, owner, repo, milestoneName string) (*int, error) { + // List all milestones in the repo + // TODO: This could/should use pagination + milestones, _, err := c.Issues.ListMilestones(ctx, owner, repo, &gogithub.MilestoneListOptions{ + State: "all", + }) + if err != nil { + return nil, err + } + // Loop through all milestones, search for one with the right name + for _, milestone := range milestones { + // Only consider a milestone with the right name + if milestone.GetTitle() != milestoneName { + continue + } + // Validate nil to avoid panics + if milestone.Number == nil { + return nil, fmt.Errorf("didn't expect milestone Number to be nil: %v", milestone) + } + // Return the Milestone number + return milestone.Number, nil + } + return nil, fmt.Errorf("couldn't find milestone with name: %s", milestoneName) +} diff --git a/pkg/storage/transaction/pullrequest.go b/pkg/storage/transaction/pullrequest.go deleted file mode 100644 index bf0fcf23..00000000 --- a/pkg/storage/transaction/pullrequest.go +++ /dev/null @@ -1,130 +0,0 @@ -package transaction - -import ( - "context" - - "github.com/fluxcd/go-git-providers/gitprovider" - "github.com/fluxcd/go-git-providers/validation" -) - -// PullRequestResult can be returned from a TransactionFunc instead of a CommitResult, if -// a PullRequest is desired to be created by the PullRequestProvider. -type PullRequestResult interface { - // PullRequestResult is a superset of CommitResult - CommitResult - - // GetLabels specifies what labels should be applied on the PR. - // +optional - GetLabels() []string - // GetAssignees specifies what user login names should be assigned to this PR. - // Note: Only users with "pull" access or more can be assigned. - // +optional - GetAssignees() []string - // GetMilestone specifies what milestone this should be attached to. - // +optional - GetMilestone() string -} - -// GenericPullRequestResult implements PullRequestResult. -var _ PullRequestResult = &GenericPullRequestResult{} - -// GenericPullRequestResult implements PullRequestResult. -type GenericPullRequestResult struct { - // GenericPullRequestResult is a superset of a CommitResult. - CommitResult - - // Labels specifies what labels should be applied on the PR. - // +optional - Labels []string - // Assignees specifies what user login names should be assigned to this PR. - // Note: Only users with "pull" access or more can be assigned. - // +optional - Assignees []string - // Milestone specifies what milestone this should be attached to. - // +optional - Milestone string -} - -func (r *GenericPullRequestResult) GetLabels() []string { - return r.Labels -} -func (r *GenericPullRequestResult) GetAssignees() []string { - return r.Assignees -} -func (r *GenericPullRequestResult) GetMilestone() string { - return r.Milestone -} -func (r *GenericPullRequestResult) Validate() error { - v := validation.New("GenericPullRequestResult") - // Just validate the "inner" object - v.Append(r.CommitResult.Validate(), r.CommitResult, "CommitResult") - return v.Error() -} - -// PullRequestSpec is the messaging interface between the TransactionStorage, and the -// PullRequestProvider. The PullRequestSpec contains all the needed information for creating -// a Pull Request successfully. -type PullRequestSpec interface { - // PullRequestSpec is a superset of PullRequestResult. - PullRequestResult - - // GetMainBranch returns the main branch of the repository. - // +required - GetMainBranch() string - // GetMergeBranch returns the branch that is pending to be merged into main with this PR. - // +required - GetMergeBranch() string - // GetMergeBranch returns the branch that is pending to be merged into main with this PR. - // +required - GetRepositoryRef() gitprovider.RepositoryRef -} - -// GenericPullRequestSpec implements PullRequestSpec. -type GenericPullRequestSpec struct { - // GenericPullRequestSpec is a superset of PullRequestResult. - PullRequestResult - - // MainBranch returns the main branch of the repository. - // +required - MainBranch string - // MergeBranch returns the branch that is pending to be merged into main with this PR. - // +required - MergeBranch string - // RepositoryRef returns the branch that is pending to be merged into main with this PR. - // +required - RepositoryRef gitprovider.RepositoryRef -} - -func (r *GenericPullRequestSpec) GetMainBranch() string { - return r.MainBranch -} -func (r *GenericPullRequestSpec) GetMergeBranch() string { - return r.MergeBranch -} -func (r *GenericPullRequestSpec) GetRepositoryRef() gitprovider.RepositoryRef { - return r.RepositoryRef -} -func (r *GenericPullRequestSpec) Validate() error { - v := validation.New("GenericPullRequestSpec") - // Just validate the "inner" object - v.Append(r.PullRequestResult.Validate(), r.PullRequestResult, "PullRequestResult") - - if len(r.MainBranch) == 0 { - v.Required("MainBranch") - } - if len(r.MergeBranch) == 0 { - v.Required("MergeBranch") - } - if r.RepositoryRef == nil { - v.Required("RepositoryRef") - } - return v.Error() -} - -// PullRequestProvider is an interface for providers that can create so-called "Pull Requests", -// as popularized by Git. A Pull Request is a formal ask for a branch to be merged into the main one. -// It can be UI-based, as in GitHub and GitLab, or it can be using some other method. -type PullRequestProvider interface { - // CreatePullRequest creates a Pull Request using the given specification. - CreatePullRequest(ctx context.Context, spec PullRequestSpec) error -} diff --git a/pkg/storage/transaction/pullrequest/github/github.go b/pkg/storage/transaction/pullrequest/github/github.go deleted file mode 100644 index d8efbd65..00000000 --- a/pkg/storage/transaction/pullrequest/github/github.go +++ /dev/null @@ -1,119 +0,0 @@ -package github - -import ( - "context" - "errors" - "fmt" - - "github.com/fluxcd/go-git-providers/github" - "github.com/fluxcd/go-git-providers/gitprovider" - gogithub "github.com/google/go-github/v32/github" - "github.com/weaveworks/libgitops/pkg/storage/transaction" -) - -// TODO: This package should really only depend on go-git-providers' abstraction interface - -var ErrProviderNotSupported = errors.New("only the Github go-git-providers provider is supported at the moment") - -// NewGitHubPRProvider returns a new transaction.PullRequestProvider from a gitprovider.Client. -func NewGitHubPRProvider(c gitprovider.Client) (transaction.PullRequestProvider, error) { - // Make sure a Github client was passed - if c.ProviderID() != github.ProviderID { - return nil, ErrProviderNotSupported - } - return &prCreator{c}, nil -} - -type prCreator struct { - c gitprovider.Client -} - -func (c *prCreator) CreatePullRequest(ctx context.Context, spec transaction.PullRequestSpec) error { - // First, validate the input - if err := spec.Validate(); err != nil { - return fmt.Errorf("given PullRequestSpec wasn't valid") - } - - // Use the "raw" go-github client to do this - ghClient := c.c.Raw().(*gogithub.Client) - - // Helper variables - owner := spec.GetRepositoryRef().GetIdentity() - repo := spec.GetRepositoryRef().GetRepository() - var body *string - if spec.GetDescription() != "" { - body = gogithub.String(spec.GetDescription()) - } - - // Create the Pull Request - pr, _, err := ghClient.PullRequests.Create(ctx, owner, repo, &gogithub.NewPullRequest{ - Head: gogithub.String(spec.GetMergeBranch()), - Base: gogithub.String(spec.GetMainBranch()), - Title: gogithub.String(spec.GetTitle()), - Body: body, - }) - if err != nil { - return err - } - - // If spec.GetMilestone() is set, fetch the ID of the milestone - // Only set milestoneID to non-nil if specified - var milestoneID *int - if len(spec.GetMilestone()) != 0 { - milestoneID, err = getMilestoneID(ctx, ghClient, owner, repo, spec.GetMilestone()) - if err != nil { - return err - } - } - - // Only set assignees to non-nil if specified - var assignees *[]string - if a := spec.GetAssignees(); len(a) != 0 { - assignees = &a - } - - // Only set labels to non-nil if specified - var labels *[]string - if l := spec.GetLabels(); len(l) != 0 { - labels = &l - } - - // Only PATCH the PR if any of the fields were set - if milestoneID != nil || assignees != nil || labels != nil { - _, _, err := ghClient.Issues.Edit(ctx, owner, repo, pr.GetNumber(), &gogithub.IssueRequest{ - Milestone: milestoneID, - Assignees: assignees, - Labels: labels, - }) - if err != nil { - return err - } - } - - return nil -} - -func getMilestoneID(ctx context.Context, c *gogithub.Client, owner, repo, milestoneName string) (*int, error) { - // List all milestones in the repo - // TODO: This could/should use pagination - milestones, _, err := c.Issues.ListMilestones(ctx, owner, repo, &gogithub.MilestoneListOptions{ - State: "all", - }) - if err != nil { - return nil, err - } - // Loop through all milestones, search for one with the right name - for _, milestone := range milestones { - // Only consider a milestone with the right name - if milestone.GetTitle() != milestoneName { - continue - } - // Validate nil to avoid panics - if milestone.Number == nil { - return nil, fmt.Errorf("didn't expect milestone Number to be nil: %v", milestone) - } - // Return the Milestone number - return milestone.Number, nil - } - return nil, fmt.Errorf("couldn't find milestone with name: %s", milestoneName) -} From b514e580f608f885c56bca02d3a9ea16b9662349 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 27 Jan 2021 23:35:23 +0200 Subject: [PATCH 079/149] Remove the old transaction code --- pkg/storage/transaction/commit.go | 79 -------------- pkg/storage/transaction/git.go | 161 ----------------------------- pkg/storage/transaction/storage.go | 28 ----- 3 files changed, 268 deletions(-) delete mode 100644 pkg/storage/transaction/commit.go delete mode 100644 pkg/storage/transaction/git.go delete mode 100644 pkg/storage/transaction/storage.go diff --git a/pkg/storage/transaction/commit.go b/pkg/storage/transaction/commit.go deleted file mode 100644 index 30e55ae0..00000000 --- a/pkg/storage/transaction/commit.go +++ /dev/null @@ -1,79 +0,0 @@ -package transaction - -import ( - "fmt" - - "github.com/fluxcd/go-git-providers/validation" -) - -// CommitResult describes a result of a transaction. -type CommitResult interface { - // GetAuthorName describes the author's name (as per git config) - // +required - GetAuthorName() string - // GetAuthorEmail describes the author's email (as per git config) - // +required - GetAuthorEmail() string - // GetTitle describes the change concisely, so it can be used as a commit message or PR title. - // +required - GetTitle() string - // GetDescription contains optional extra information about the change. - // +optional - GetDescription() string - - // GetMessage returns GetTitle() followed by a newline and GetDescription(), if set. - GetMessage() string - // Validate validates that all required fields are set, and given data is valid. - Validate() error -} - -// GenericCommitResult implements CommitResult. -var _ CommitResult = &GenericCommitResult{} - -// GenericCommitResult implements CommitResult. -type GenericCommitResult struct { - // AuthorName describes the author's name (as per git config) - // +required - AuthorName string - // AuthorEmail describes the author's email (as per git config) - // +required - AuthorEmail string - // Title describes the change concisely, so it can be used as a commit message or PR title. - // +required - Title string - // Description contains optional extra information about the change. - // +optional - Description string -} - -func (r *GenericCommitResult) GetAuthorName() string { - return r.AuthorName -} -func (r *GenericCommitResult) GetAuthorEmail() string { - return r.AuthorEmail -} -func (r *GenericCommitResult) GetTitle() string { - return r.Title -} -func (r *GenericCommitResult) GetDescription() string { - return r.Description -} -func (r *GenericCommitResult) GetMessage() string { - if len(r.Description) == 0 { - return r.Title - } - return fmt.Sprintf("%s\n%s", r.Title, r.Description) -} -func (r *GenericCommitResult) Validate() error { - v := validation.New("GenericCommitResult") - if len(r.AuthorName) == 0 { - v.Required("AuthorName") - } - if len(r.AuthorEmail) == 0 { - v.Required("AuthorEmail") - } - if len(r.Title) == 0 { - v.Required("Title") - } - return v.Error() -} diff --git a/pkg/storage/transaction/git.go b/pkg/storage/transaction/git.go deleted file mode 100644 index efc57ab3..00000000 --- a/pkg/storage/transaction/git.go +++ /dev/null @@ -1,161 +0,0 @@ -package transaction - -import ( - "context" - "fmt" - "strings" - - "github.com/sirupsen/logrus" - "github.com/weaveworks/libgitops/pkg/gitdir" - "github.com/weaveworks/libgitops/pkg/runtime" - "github.com/weaveworks/libgitops/pkg/serializer" - "github.com/weaveworks/libgitops/pkg/storage" - "github.com/weaveworks/libgitops/pkg/util" - "github.com/weaveworks/libgitops/pkg/util/watcher" -) - -var excludeDirs = []string{".git"} - -func NewGitStorage(gitDir gitdir.GitDirectory, prProvider PullRequestProvider, ser serializer.Serializer) (TransactionStorage, error) { - // Make sure the repo is cloned. If this func has already been called, it will be a no-op. - if err := gitDir.StartCheckoutLoop(); err != nil { - return nil, err - } - - raw := storage.NewGenericMappedRawStorage(gitDir.Dir()) - s := storage.NewGenericStorage(raw, ser, []runtime.IdentifierFactory{runtime.Metav1NameIdentifier}) - - gitStorage := &GitStorage{ - ReadStorage: s, - s: s, - raw: raw, - gitDir: gitDir, - prProvider: prProvider, - } - // Do a first sync now, and then start the background loop - if err := gitStorage.sync(); err != nil { - return nil, err - } - gitStorage.syncLoop() - - return gitStorage, nil -} - -type GitStorage struct { - storage.ReadStorage - - s storage.Storage - raw storage.MappedRawStorage - gitDir gitdir.GitDirectory - prProvider PullRequestProvider -} - -func (s *GitStorage) syncLoop() { - go func() { - for { - if commit, ok := <-s.gitDir.CommitChannel(); ok { - logrus.Debugf("GitStorage: Got info about commit %q, syncing...", commit) - if err := s.sync(); err != nil { - logrus.Errorf("GitStorage: Got sync error: %v", err) - } - } - } - }() -} - -func (s *GitStorage) sync() error { - mappings, err := computeMappings(s.gitDir.Dir(), s.s) - if err != nil { - return err - } - logrus.Debugf("Rewriting the mappings to %v", mappings) - s.raw.SetMappings(mappings) - return nil -} - -func (s *GitStorage) Transaction(ctx context.Context, streamName string, fn TransactionFunc) error { - // Append random bytes to the end of the stream name if it ends with a dash - if strings.HasSuffix(streamName, "-") { - suffix, err := util.RandomSHA(4) - if err != nil { - return err - } - streamName += suffix - } - - // Make sure we have the latest available state - if err := s.gitDir.Pull(ctx); err != nil { - return err - } - // Make sure no other Git ops can take place during the transaction, wait for other ongoing operations. - s.gitDir.Suspend() - defer s.gitDir.Resume() - // Always switch back to the main branch afterwards. - // TODO ordering of the defers, and return deferred error - defer func() { _ = s.gitDir.CheckoutMainBranch() }() - - // Check out a new branch with the given name - if err := s.gitDir.CheckoutNewBranch(streamName); err != nil { - return err - } - // Invoke the transaction - result, err := fn(ctx, s.s) - if err != nil { - return err - } - // Make sure the result is valid - if err := result.Validate(); err != nil { - return fmt.Errorf("transaction result is not valid: %w", err) - } - // Perform the commit - if err := s.gitDir.Commit(ctx, result.GetAuthorName(), result.GetAuthorEmail(), result.GetMessage()); err != nil { - return err - } - // Return if no PR should be made - prResult, ok := result.(PullRequestResult) - if !ok { - return nil - } - // If a PR was asked for, and no provider was given, error out - if s.prProvider == nil { - return ErrNoPullRequestProvider - } - // Create the PR using the provider. - return s.prProvider.CreatePullRequest(ctx, &GenericPullRequestSpec{ - PullRequestResult: prResult, - MainBranch: s.gitDir.MainBranch(), - MergeBranch: streamName, - RepositoryRef: s.gitDir.RepositoryRef(), - }) -} - -func computeMappings(dir string, s storage.Storage) (map[storage.ObjectKey]string, error) { - validExts := make([]string, 0, len(storage.ContentTypes)) - for ext := range storage.ContentTypes { - validExts = append(validExts, ext) - } - - files, err := watcher.WalkDirectoryForFiles(dir, validExts, excludeDirs) - if err != nil { - return nil, err - } - - // TODO: Compute the difference between the earlier state, and implement EventStorage so the user - // can automatically subscribe to changes of objects between versions. - m := map[storage.ObjectKey]string{} - for _, file := range files { - partObjs, err := storage.DecodePartialObjects(serializer.FromFile(file), s.Serializer().Scheme(), false, nil) - if err != nil { - logrus.Errorf("couldn't decode %q into a partial object: %v", file, err) - continue - } - key, err := s.ObjectKeyFor(partObjs[0]) - if err != nil { - logrus.Errorf("couldn't get objectkey for partial object: %v", err) - continue - } - logrus.Debugf("Adding mapping between %s and %q", key, file) - m[key] = file - } - return m, nil -} diff --git a/pkg/storage/transaction/storage.go b/pkg/storage/transaction/storage.go deleted file mode 100644 index 8a60e93b..00000000 --- a/pkg/storage/transaction/storage.go +++ /dev/null @@ -1,28 +0,0 @@ -package transaction - -import ( - "context" - "errors" - - "github.com/weaveworks/libgitops/pkg/storage" -) - -var ( - ErrAbortTransaction = errors.New("transaction aborted") - ErrTransactionActive = errors.New("transaction is active") - ErrNoPullRequestProvider = errors.New("no pull request provider given") -) - -type TransactionFunc func(ctx context.Context, s storage.Storage) (CommitResult, error) - -type TransactionStorage interface { - storage.ReadStorage - - // Transaction creates a new "stream" (for Git: branch) with the given name, or - // prefix if streamName ends with a dash (in that case, a 8-char hash will be appended). - // The environment is made sure to be as up-to-date as possible before fn executes. When - // fn executes, the given storage can be used to modify the desired state. If you want to - // "commit" the changes made in fn, just return nil. If you want to abort, return ErrAbortTransaction. - // If you want to - Transaction(ctx context.Context, streamName string, fn TransactionFunc) error -} From d64a0e61563fb9d3128f9626729bcbe5a7804aff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 27 Jan 2021 23:35:54 +0200 Subject: [PATCH 080/149] Remove the old cache implementation; not needed anymore. --- pkg/storage/cache/cache.go | 197 ------------------------------------ pkg/storage/cache/index.go | 156 ---------------------------- pkg/storage/cache/object.go | 96 ------------------ 3 files changed, 449 deletions(-) delete mode 100644 pkg/storage/cache/cache.go delete mode 100644 pkg/storage/cache/index.go delete mode 100644 pkg/storage/cache/object.go diff --git a/pkg/storage/cache/cache.go b/pkg/storage/cache/cache.go deleted file mode 100644 index 11a4991b..00000000 --- a/pkg/storage/cache/cache.go +++ /dev/null @@ -1,197 +0,0 @@ -package cache - -/* - -TODO: Revisit if we need this file/package in the future. - -import ( - log "github.com/sirupsen/logrus" - "github.com/weaveworks/libgitops/pkg/runtime" - "github.com/weaveworks/libgitops/pkg/serializer" - "github.com/weaveworks/libgitops/pkg/storage" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// Cache is an intermediate caching layer, which conforms to Storage -// Typically you back the cache with an actual storage -type Cache interface { - storage.Storage - // Flush is used to write the state of the entire cache to storage - // Warning: this is a very expensive operation - Flush() error -} - -type cache struct { - // storage is the backing Storage for the cache - // used to look up non-cached Objects - storage storage.Storage - - // index caches the Objects by GroupVersionKind and UID - // This guarantees uniqueness when looking up a specific Object - index *index -} - -var _ Cache = &cache{} - -func NewCache(backingStorage storage.Storage) Cache { - c := &cache{ - storage: backingStorage, - index: newIndex(backingStorage), - } - - return c -} - -func (s *cache) Serializer() serializer.Serializer { - return s.storage.Serializer() -} - -func (c *cache) New(gvk schema.GroupVersionKind) (runtime.Object, error) { - // Request the storage to create the Object. The - // newly generated Object has not got an UID which - // is required for indexing, so just return it - // without storing it into the cache - return c.storage.New(gvk) -} - -func (c *cache) Get(gvk schema.GroupVersionKind, uid runtime.UID) (obj runtime.Object, err error) { - log.Tracef("cache: Get %s with UID %q", gvk.Kind, uid) - - // If the requested Object resides in the cache, return it - if obj, err = c.index.loadByID(gvk, uid); err != nil || obj != nil { - return - } - - // Request the Object from the storage - obj, err = c.storage.Get(gvk, uid) - - // If no errors occurred, cache it - if err == nil { - err = c.index.store(obj) - } - - return -} - -func (c *cache) GetMeta(gvk schema.GroupVersionKind, uid runtime.UID) (obj runtime.Object, err error) { - log.Tracef("cache: GetMeta %s with UID %q", gvk.Kind, uid) - - obj, err = c.storage.GetMeta(gvk, uid) - - // If no errors occurred while loading, store the Object in the cache - if err == nil { - err = c.index.storeMeta(obj) - } - - return -} - -func (c *cache) Set(gvk schema.GroupVersionKind, obj runtime.Object) error { - log.Tracef("cache: Set %s with UID %q", gvk.Kind, obj.GetUID()) - - // Store the changed Object in the cache - if err := c.index.store(obj); err != nil { - return err - } - - // TODO: For now the cache always flushes, we might add automatic flushing later - return c.storage.Set(gvk, obj) -} - -func (c *cache) Patch(gvk schema.GroupVersionKind, uid runtime.UID, patch []byte) error { - // TODO: For now patches are always flushed, the cache will load the updated Object on-demand on access - return c.storage.Patch(gvk, uid, patch) -} - -func (c *cache) Delete(gvk schema.GroupVersionKind, uid runtime.UID) error { - log.Tracef("cache: Delete %s with UID %q", gvk.Kind, uid) - - // Delete the given Object from the cache and storage - c.index.delete(gvk, uid) - return c.storage.Delete(gvk, uid) -} - -type listFunc func(gvk schema.GroupVersionKind) ([]runtime.Object, error) -type cacheStoreFunc func([]runtime.Object) error - -// list is a common handler for List and ListMeta -func (c *cache) list(gvk schema.GroupVersionKind, slf, clf listFunc, csf cacheStoreFunc) (objs []runtime.Object, err error) { - var storageCount uint64 - if storageCount, err = c.storage.Count(gvk); err != nil { - return - } - - if c.index.count(gvk) != storageCount { - log.Tracef("cache: miss when listing: %s", gvk) - // If the cache doesn't track all of the Objects, request them from the storage - if objs, err = slf(gvk); err != nil { - // If no errors occurred, store the Objects in the cache - err = csf(objs) - } - } else { - log.Tracef("cache: hit when listing: %s", gvk) - // If the cache tracks everything, return the cache's contents - objs, err = clf(gvk) - } - - return -} - -func (c *cache) List(gvk schema.GroupVersionKind) ([]runtime.Object, error) { - return c.list(gvk, c.storage.List, c.index.list, c.index.storeAll) -} - -func (c *cache) ListMeta(gvk schema.GroupVersionKind) ([]runtime.Object, error) { - return c.list(gvk, c.storage.ListMeta, c.index.listMeta, c.index.storeAllMeta) -} - -func (c *cache) Count(gvk schema.GroupVersionKind) (uint64, error) { - // The cache is transparent about how many items it has cached - return c.storage.Count(gvk) -} - -func (c *cache) Checksum(gvk schema.GroupVersionKind, uid runtime.UID) (string, error) { - // The cache is transparent about the checksums - return c.storage.Checksum(gvk, uid) -} - -func (c *cache) RawStorage() storage.RawStorage { - return c.storage.RawStorage() -} - -func (c *cache) Close() error { - return c.storage.Close() -} - -func (c *cache) Flush() error { - // Load the entire cache - allObjects, err := c.index.loadAll() - if err != nil { - return err - } - - for _, obj := range allObjects { - // Request the storage to save each Object - if err := c.storage.Set(obj); err != nil { - return err - } - } - - return nil -} - -// PartialObjectFrom is used to create a bound PartialObjectImpl from an Object. -// Note: This might be useful later (maybe here or maybe in pkg/runtime) if re-enable the cache -func PartialObjectFrom(obj Object) (PartialObject, error) { - tm, ok := obj.GetObjectKind().(*metav1.TypeMeta) - if !ok { - return nil, fmt.Errorf("PartialObjectFrom: Cannot cast obj to *metav1.TypeMeta, is %T", obj.GetObjectKind()) - } - om, ok := obj.GetObjectMeta().(*metav1.ObjectMeta) - if !ok { - return nil, fmt.Errorf("PartialObjectFrom: Cannot cast obj to *metav1.ObjectMeta, is %T", obj.GetObjectMeta()) - } - return &PartialObjectImpl{tm, om}, nil -} - -*/ diff --git a/pkg/storage/cache/index.go b/pkg/storage/cache/index.go deleted file mode 100644 index 326014f3..00000000 --- a/pkg/storage/cache/index.go +++ /dev/null @@ -1,156 +0,0 @@ -package cache - -/* - -TODO: Revisit if we need this file/package in the future. - -import ( - log "github.com/sirupsen/logrus" - "github.com/weaveworks/libgitops/pkg/runtime" - "github.com/weaveworks/libgitops/pkg/storage" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -type index struct { - storage storage.Storage - objects map[schema.GroupVersionKind]map[runtime.UID]*cacheObject -} - -func newIndex(storage storage.Storage) *index { - return &index{ - storage: storage, - objects: make(map[schema.GroupVersionKind]map[runtime.UID]*cacheObject), - } -} - -func (i *index) loadByID(gvk schema.GroupVersionKind, uid runtime.UID) (runtime.Object, error) { - if uids, ok := i.objects[gvk]; ok { - if obj, ok := uids[uid]; ok { - log.Tracef("index: cache hit for %s with UID %q", gvk.Kind, uid) - return obj.loadFull() - } - } - - log.Tracef("index: cache miss for %s with UID %q", gvk.Kind, uid) - return nil, nil -} - -func (i *index) loadAll() ([]runtime.Object, error) { - var size uint64 - - for gvk := range i.objects { - size += i.count(gvk) - } - - all := make([]runtime.Object, 0, size) - - for gvk := range i.objects { - if objects, err := i.list(gvk); err == nil { - all = append(all, objects...) - } else { - return nil, err - } - } - - return all, nil -} - -func store(i *index, obj runtime.Object, apiType bool) error { - // If store is called for an invalid Object lacking an UID, - // panic and print the stack trace. This should never happen. - if obj.GetUID() == "" { - panic("Attempt to cache invalid Object: missing UID") - } - - co, err := newCacheObject(i.storage, obj, apiType) - if err != nil { - return err - } - - gvk := co.object.GetObjectKind().GroupVersionKind() - - if _, ok := i.objects[gvk]; !ok { - i.objects[gvk] = make(map[runtime.UID]*cacheObject) - } - - log.Tracef("index: storing %s object with UID %q, meta: %t", gvk.Kind, obj.GetName(), apiType) - i.objects[gvk][co.object.GetUID()] = co - - return nil -} - -func (i *index) store(obj runtime.Object) error { - return store(i, obj, false) -} - -func (i *index) storeAll(objs []runtime.Object) (err error) { - for _, obj := range objs { - if err = i.store(obj); err != nil { - break - } - } - - return -} - -func (i *index) storeMeta(obj runtime.Object) error { - return store(i, obj, true) -} - -func (i *index) storeAllMeta(objs []runtime.Object) (err error) { - for _, obj := range objs { - if uids, ok := i.objects[obj.GetObjectKind().GroupVersionKind()]; ok { - if _, ok := uids[obj.GetUID()]; ok { - continue - } - } - - if err = i.storeMeta(obj); err != nil { - break - } - } - - return -} - -func (i *index) delete(gvk schema.GroupVersionKind, uid runtime.UID) { - if uids, ok := i.objects[gvk]; ok { - delete(uids, uid) - } -} - -func (i *index) count(gvk schema.GroupVersionKind) (count uint64) { - count = uint64(len(i.objects[gvk])) - log.Tracef("index: counted %d %s object(s)", count, gvk.Kind) - return -} - -func list(i *index, gvk schema.GroupVersionKind, apiTypes bool) ([]runtime.Object, error) { - uids := i.objects[gvk] - list := make([]runtime.Object, 0, len(uids)) - - log.Tracef("index: listing %s objects, meta: %t", gvk, apiTypes) - for _, obj := range uids { - loadFunc := obj.loadFull - if apiTypes { - loadFunc = obj.loadAPI - } - - if result, err := loadFunc(); err != nil { - return nil, err - } else { - list = append(list, result) - } - } - - return list, nil -} - -func (i *index) list(gvk schema.GroupVersionKind) ([]runtime.Object, error) { - return list(i, gvk, false) -} - -func (i *index) listMeta(gvk schema.GroupVersionKind) ([]runtime.Object, error) { - return list(i, gvk, true) -} -*/ diff --git a/pkg/storage/cache/object.go b/pkg/storage/cache/object.go deleted file mode 100644 index c0e807cf..00000000 --- a/pkg/storage/cache/object.go +++ /dev/null @@ -1,96 +0,0 @@ -package cache - -/* - -TODO: Revisit if we need this file/package in the future. - -import ( - log "github.com/sirupsen/logrus" - "github.com/weaveworks/libgitops/pkg/runtime" - "github.com/weaveworks/libgitops/pkg/storage" -) - -type cacheObject struct { - storage storage.Storage - object runtime.Object - checksum string - apiType bool -} - -func newCacheObject(s storage.Storage, object runtime.Object, apiType bool) (c *cacheObject, err error) { - c = &cacheObject{ - storage: s, - object: object, - apiType: apiType, - } - - if c.checksum, err = s.Checksum(c.object.GroupVersionKind(), c.object.GetUID()); err != nil { - c = nil - } - - return -} - -// loadFull returns the full Object, loading it only if it hasn't been cached before or the checksum has changed -func (c *cacheObject) loadFull() (runtime.Object, error) { - var checksum string - reload := c.apiType - - if !reload { - if chk, err := c.storage.Checksum(c.object.GroupVersionKind(), c.object.GetUID()); err != nil { - return nil, err - } else if chk != c.checksum { - log.Tracef("cacheObject: %q invalidated, checksum mismatch: %q -> %q", c.object.GetName(), c.checksum, chk) - checksum = chk - reload = true - } else { - log.Tracef("cacheObject: %q checksum: %q", c.object.GetName(), c.checksum) - } - } - - if reload { - log.Tracef("cacheObject: full load triggered for %q", c.object.GetName()) - obj, err := c.storage.Get(c.object.GroupVersionKind(), c.object.GetUID()) - if err != nil { - return nil, err - } - - // Only apply the change after a successful Get - c.object = obj - c.apiType = false - - if len(checksum) > 0 { - c.checksum = checksum - } - } - - return c.object, nil -} - -// loadAPI returns the APIType of the Object, loading it only if the checksum has changed -func (c *cacheObject) loadAPI() (runtime.Object, error) { - if chk, err := c.storage.Checksum(c.object.GroupVersionKind(), c.object.GetUID()); err != nil { - return nil, err - } else if chk != c.checksum { - log.Tracef("cacheObject: %q invalidated, checksum mismatch: %q -> %q", c.object.GetName(), c.checksum, chk) - log.Tracef("cacheObject: API load triggered for %q", c.object.GetName()) - obj, err := c.storage.GetMeta(c.object.GroupVersionKind(), c.object.GetUID()) - if err != nil { - return nil, err - } - - // Only apply the change after a successful GetMeta - c.object = obj - c.checksum = chk - c.apiType = true - } else { - log.Tracef("cacheObject: %q checksum: %q", c.object.GetName(), c.checksum) - } - - if c.apiType { - return c.object, nil - } - - return runtime.PartialObjectFrom(c.object), nil -} -*/ From 0afeff23c844d86cbcee84736ab097a09495fe4c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 27 Jan 2021 23:36:12 +0200 Subject: [PATCH 081/149] Remove the old client code, not needed anymore. --- pkg/client/client_dynamic.go | 97 ---------------- pkg/client/client_resource_template.go | 152 ------------------------- 2 files changed, 249 deletions(-) delete mode 100644 pkg/client/client_dynamic.go delete mode 100644 pkg/client/client_resource_template.go diff --git a/pkg/client/client_dynamic.go b/pkg/client/client_dynamic.go deleted file mode 100644 index 5f3ac2a4..00000000 --- a/pkg/client/client_dynamic.go +++ /dev/null @@ -1,97 +0,0 @@ -// +build ignore - -package client - -import ( - "fmt" - - "github.com/weaveworks/libgitops/pkg/runtime" - "github.com/weaveworks/libgitops/pkg/storage" - "github.com/weaveworks/libgitops/pkg/storage/filterer" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// DynamicClient is an interface for accessing API types generically -type DynamicClient interface { - // New returns a new Object of its kind - New() runtime.Object - // Get returns an Object matching the UID from the storage - Get(runtime.UID) (runtime.Object, error) - // Set saves an Object into the persistent storage - Set(runtime.Object) error - // Patch performs a strategic merge patch on the object with - // the given UID, using the byte-encoded patch given - Patch(runtime.UID, []byte) error - // Find returns an Object based on the given filter, filters can - // match e.g. the Object's Name, UID or a specific property - Find(filter filterer.BaseFilter) (runtime.Object, error) - // FindAll returns multiple Objects based on the given filter, filters can - // match e.g. the Object's Name, UID or a specific property - FindAll(filter filterer.BaseFilter) ([]runtime.Object, error) - // Delete deletes an Object from the storage - Delete(uid runtime.UID) error - // List returns a list of all Objects available - List() ([]runtime.Object, error) -} - -// dynamicClient is a struct implementing the DynamicClient interface -// It uses a shared storage instance passed from the Client together with its own Filterer -type dynamicClient struct { - storage storage.Storage - gvk schema.GroupVersionKind - filterer *filterer.Filterer -} - -// NewDynamicClient builds the dynamicClient struct using the storage implementation and a new Filterer -func NewDynamicClient(s storage.Storage, gvk schema.GroupVersionKind) DynamicClient { - return &dynamicClient{ - storage: s, - gvk: gvk, - filterer: filterer.NewFilterer(s), - } -} - -// New returns a new Object of its kind -func (c *dynamicClient) New() runtime.Object { - obj, err := c.storage.New(c.gvk) - if err != nil { - panic(fmt.Sprintf("Client.New must not return an error: %v", err)) - } - return obj -} - -// Get returns an Object based the given UID -func (c *dynamicClient) Get(uid runtime.UID) (runtime.Object, error) { - return c.storage.Get(c.gvk, uid) -} - -// Set saves an Object into the persistent storage -func (c *dynamicClient) Set(resource runtime.Object) error { - return c.storage.Set(c.gvk, resource) -} - -// Patch performs a strategic merge patch on the object with -// the given UID, using the byte-encoded patch given -func (c *dynamicClient) Patch(uid runtime.UID, patch []byte) error { - return c.storage.Patch(c.gvk, uid, patch) -} - -// Find returns an Object based on a given Filter -func (c *dynamicClient) Find(filter filterer.BaseFilter) (runtime.Object, error) { - return c.filterer.Find(c.gvk, filter) -} - -// FindAll returns multiple Objects based on a given Filter -func (c *dynamicClient) FindAll(filter filterer.BaseFilter) ([]runtime.Object, error) { - return c.filterer.FindAll(c.gvk, filter) -} - -// Delete deletes the Object from the storage -func (c *dynamicClient) Delete(uid runtime.UID) error { - return c.storage.Delete(c.gvk, uid) -} - -// List returns a list of all Objects available -func (c *dynamicClient) List() ([]runtime.Object, error) { - return c.storage.List(c.gvk) -} diff --git a/pkg/client/client_resource_template.go b/pkg/client/client_resource_template.go deleted file mode 100644 index 53bc8741..00000000 --- a/pkg/client/client_resource_template.go +++ /dev/null @@ -1,152 +0,0 @@ -// +build ignore - -/* - Note: This file is autogenerated! Do not edit it manually! - Edit client_resource_template.go instead, and run - hack/generate-client.sh afterwards. -*/ - -package client - -import ( - "fmt" - - api "API_DIR" - - log "github.com/sirupsen/logrus" - "github.com/weaveworks/libgitops/pkg/runtime" - "github.com/weaveworks/libgitops/pkg/storage" - "github.com/weaveworks/libgitops/pkg/storage/filterer" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// ResourceClient is an interface for accessing Resource-specific API objects -type ResourceClient interface { - // New returns a new Resource - New() *api.Resource - // Get returns the Resource matching given UID from the storage - Get(runtime.UID) (*api.Resource, error) - // Set saves the given Resource into persistent storage - Set(*api.Resource) error - // Patch performs a strategic merge patch on the object with - // the given UID, using the byte-encoded patch given - Patch(runtime.UID, []byte) error - // Find returns the Resource matching the given filter, filters can - // match e.g. the Object's Name, UID or a specific property - Find(filter filterer.BaseFilter) (*api.Resource, error) - // FindAll returns multiple Resources matching the given filter, filters can - // match e.g. the Object's Name, UID or a specific property - FindAll(filter filterer.BaseFilter) ([]*api.Resource, error) - // Delete deletes the Resource with the given UID from the storage - Delete(uid runtime.UID) error - // List returns a list of all Resources available - List() ([]*api.Resource, error) -} - -// Resources returns the ResourceClient for the Client object -func (c *Client) Resources() ResourceClient { - if c.resourceClient == nil { - c.resourceClient = newResourceClient(c.storage, c.gv) - } - - return c.resourceClient -} - -// resourceClient is a struct implementing the ResourceClient interface -// It uses a shared storage instance passed from the Client together with its own Filterer -type resourceClient struct { - storage storage.Storage - filterer *filterer.Filterer - gvk schema.GroupVersionKind -} - -// newResourceClient builds the resourceClient struct using the storage implementation and a new Filterer -func newResourceClient(s storage.Storage, gv schema.GroupVersion) ResourceClient { - return &resourceClient{ - storage: s, - filterer: filterer.NewFilterer(s), - gvk: gv.WithKind(api.KindResource.Title()), - } -} - -// New returns a new Object of its kind -func (c *resourceClient) New() *api.Resource { - log.Tracef("Client.New; GVK: %v", c.gvk) - obj, err := c.storage.New(c.gvk) - if err != nil { - panic(fmt.Sprintf("Client.New must not return an error: %v", err)) - } - return obj.(*api.Resource) -} - -// Find returns a single Resource based on the given Filter -func (c *resourceClient) Find(filter filterer.BaseFilter) (*api.Resource, error) { - log.Tracef("Client.Find; GVK: %v", c.gvk) - object, err := c.filterer.Find(c.gvk, filter) - if err != nil { - return nil, err - } - - return object.(*api.Resource), nil -} - -// FindAll returns multiple Resources based on the given Filter -func (c *resourceClient) FindAll(filter filterer.BaseFilter) ([]*api.Resource, error) { - log.Tracef("Client.FindAll; GVK: %v", c.gvk) - matches, err := c.filterer.FindAll(c.gvk, filter) - if err != nil { - return nil, err - } - - results := make([]*api.Resource, 0, len(matches)) - for _, item := range matches { - results = append(results, item.(*api.Resource)) - } - - return results, nil -} - -// Get returns the Resource matching given UID from the storage -func (c *resourceClient) Get(uid runtime.UID) (*api.Resource, error) { - log.Tracef("Client.Get; UID: %q, GVK: %v", uid, c.gvk) - object, err := c.storage.Get(c.gvk, uid) - if err != nil { - return nil, err - } - - return object.(*api.Resource), nil -} - -// Set saves the given Resource into the persistent storage -func (c *resourceClient) Set(resource *api.Resource) error { - log.Tracef("Client.Set; UID: %q, GVK: %v", resource.GetUID(), c.gvk) - return c.storage.Set(c.gvk, resource) -} - -// Patch performs a strategic merge patch on the object with -// the given UID, using the byte-encoded patch given -func (c *resourceClient) Patch(uid runtime.UID, patch []byte) error { - return c.storage.Patch(c.gvk, uid, patch) -} - -// Delete deletes the Resource from the storage -func (c *resourceClient) Delete(uid runtime.UID) error { - log.Tracef("Client.Delete; UID: %q, GVK: %v", uid, c.gvk) - return c.storage.Delete(c.gvk, uid) -} - -// List returns a list of all Resources available -func (c *resourceClient) List() ([]*api.Resource, error) { - log.Tracef("Client.List; GVK: %v", c.gvk) - list, err := c.storage.List(c.gvk) - if err != nil { - return nil, err - } - - results := make([]*api.Resource, 0, len(list)) - for _, item := range list { - results = append(results, item.(*api.Resource)) - } - - return results, nil -} From 6a9d9bba1b96891c931d6f4468b7d9523e99ef84 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 27 Jan 2021 23:36:41 +0200 Subject: [PATCH 082/149] Remove the old SyncStorage; not needed anymore. --- pkg/storage/sync/storage.go | 188 ------------------------------------ 1 file changed, 188 deletions(-) delete mode 100644 pkg/storage/sync/storage.go diff --git a/pkg/storage/sync/storage.go b/pkg/storage/sync/storage.go deleted file mode 100644 index 458f7fa2..00000000 --- a/pkg/storage/sync/storage.go +++ /dev/null @@ -1,188 +0,0 @@ -package sync - -/* - -TODO: Revisit if we need this file/package in the future. - -import ( - "fmt" - - log "github.com/sirupsen/logrus" - "github.com/weaveworks/libgitops/pkg/runtime" - "github.com/weaveworks/libgitops/pkg/storage" - "github.com/weaveworks/libgitops/pkg/storage/watch" - "github.com/weaveworks/libgitops/pkg/storage/watch/update" - "github.com/weaveworks/libgitops/pkg/util/sync" -) - -const updateBuffer = 4096 // How many updates to buffer, 4096 should be enough for even a high update frequency - -// SyncStorage is a Storage implementation taking in multiple Storages and -// keeping them in sync. Any write operation executed on the SyncStorage -// is propagated to all of the Storages it manages (including the embedded -// one). For any retrieval or generation operation, the embedded Storage -// will be used (it is treated as read-write). As all other Storages only -// receive write operations, they can be thought of as write-only. -type SyncStorage struct { - storage.Storage - storages []storage.Storage - inboundStream update.UpdateStream - outboundStream update.UpdateStream - monitor *sync.Monitor -} - -// SyncStorage implements update.EventStorage. -var _ update.EventStorage = &SyncStorage{} - -// NewSyncStorage constructs a new SyncStorage -func NewSyncStorage(rwStorage storage.Storage, wStorages ...storage.Storage) storage.Storage { - ss := &SyncStorage{ - Storage: rwStorage, - storages: append(wStorages, rwStorage), - } - - for _, s := range ss.storages { - if watchStorage, ok := s.(watch.WatchStorage); ok { - // Populate eventStream if we found a watchstorage - if ss.inboundStream == nil { - ss.inboundStream = make(update.UpdateStream, updateBuffer) - } - watchStorage.SetUpdateStream(ss.inboundStream) - } - } - - if ss.inboundStream != nil { - ss.monitor = sync.RunMonitor(ss.monitorFunc) - ss.outboundStream = make(update.UpdateStream, updateBuffer) - } - - return ss -} - -// Set is propagated to all Storages -func (ss *SyncStorage) Set(obj runtime.Object) error { - return ss.runAll(func(s storage.Storage) error { - return s.Set(obj) - }) -} - -// Patch is propagated to all Storages -func (ss *SyncStorage) Patch(key storage.ObjectKey, patch []byte) error { - return ss.runAll(func(s storage.Storage) error { - return s.Patch(key, patch) - }) -} - -// Delete is propagated to all Storages -func (ss *SyncStorage) Delete(key storage.ObjectKey) error { - return ss.runAll(func(s storage.Storage) error { - return s.Delete(key) - }) -} - -func (ss *SyncStorage) Close() error { - // Close all WatchStorages - for _, s := range ss.storages { - if watchStorage, ok := s.(watch.WatchStorage); ok { - _ = watchStorage.Close() - } - } - - // Close the event streams if set - if ss.inboundStream != nil { - close(ss.inboundStream) - } - if ss.outboundStream != nil { - close(ss.outboundStream) - } - // Wait for the monitor goroutine - ss.monitor.Wait() - return nil -} - -func (ss *SyncStorage) GetUpdateStream() update.UpdateStream { - return ss.outboundStream -} - -// runAll runs the given function for all Storages in parallel and aggregates all errors -func (ss *SyncStorage) runAll(f func(storage.Storage) error) (err error) { - type result struct { - int - error - } - - errC := make(chan result) - for i, s := range ss.storages { - go func(i int, s storage.Storage) { - errC <- result{i, f(s)} - }(i, s) // NOTE: This requires i and s as arguments, otherwise they will be evaluated for one Storage only - } - - for i := 0; i < len(ss.storages); i++ { - if result := <-errC; result.error != nil { - if err == nil { - err = fmt.Errorf("SyncStorage: Error in Storage %d: %v", result.int, result.error) - } else { - err = fmt.Errorf("%v\n%29s %d: %v", err, "and error in Storage", result.int, result.error) - } - } - } - - return -} - -func (ss *SyncStorage) monitorFunc() { - log.Debug("SyncStorage: Monitoring thread started") - defer log.Debug("SyncStorage: Monitoring thread stopped") - - // TODO: Support detecting changes done when the GitOps daemon isn't running - // This is difficult to do though, as we have don't know which state is the latest - // For now, only update the state on write when the daemon is running - for { - upd, ok := <-ss.inboundStream - if ok { - log.Debugf("SyncStorage: Received update %v %t", upd, ok) - - gvk := upd.PartialObject.GetObjectKind().GroupVersionKind() - uid := upd.PartialObject.GetUID() - key := storage.NewObjectKey(storage.NewKindKey(gvk), runtime.NewIdentifier(string(uid))) - log.Debugf("SyncStorage: Object has gvk=%q and uid=%q", gvk, uid) - - switch upd.Event { - case update.ObjectEventModify, update.ObjectEventCreate: - // First load the Object using the Storage given in the update, - // then set it using the client constructed above - - obj, err := upd.Storage.Get(key) - if err != nil { - log.Errorf("Failed to get Object with UID %q: %v", upd.PartialObject.GetUID(), err) - continue - } - - if err = ss.Set(obj); err != nil { - log.Errorf("Failed to set Object with UID %q: %v", upd.PartialObject.GetUID(), err) - continue - } - case update.ObjectEventDelete: - // For deletion we use the generated "fake" APIType object - if err := ss.Delete(key); err != nil { - log.Errorf("Failed to delete Object with UID %q: %v", upd.PartialObject.GetUID(), err) - continue - } - } - - // Send the update to the listeners unless the channel is full, - // in which case issue a warning. The channel can hold as many - // updates as updateBuffer specifies. - select { - case ss.outboundStream <- upd: - log.Debugf("SyncStorage: Sent update: %v", upd) - default: - log.Warn("SyncStorage: Failed to send update, channel full") - } - } else { - return - } - } -} -*/ From 3e0583596e7098d00e2a385b3fddddbaf25e47fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 27 Jan 2021 23:40:00 +0200 Subject: [PATCH 083/149] Move pkg/logs to cmd/ as it is only sample code, not something that should be used in the library itself. --- {pkg => cmd/common}/logs/flag/flag.go | 3 +++ {pkg => cmd/common}/logs/logs.go | 2 ++ 2 files changed, 5 insertions(+) rename {pkg => cmd/common}/logs/flag/flag.go (84%) rename {pkg => cmd/common}/logs/logs.go (95%) diff --git a/pkg/logs/flag/flag.go b/cmd/common/logs/flag/flag.go similarity index 84% rename from pkg/logs/flag/flag.go rename to cmd/common/logs/flag/flag.go index 3c226cfe..83f59678 100644 --- a/pkg/logs/flag/flag.go +++ b/cmd/common/logs/flag/flag.go @@ -5,6 +5,9 @@ import ( "github.com/spf13/pflag" ) +// TODO: Use these flags in the sample binaries? +// TODO: Move to the way controller-runtime does logs instead? + type LogLevelFlag struct { value *logrus.Level } diff --git a/pkg/logs/logs.go b/cmd/common/logs/logs.go similarity index 95% rename from pkg/logs/logs.go rename to cmd/common/logs/logs.go index 1ca78f1b..c5b11a85 100644 --- a/pkg/logs/logs.go +++ b/cmd/common/logs/logs.go @@ -8,6 +8,8 @@ import ( log "github.com/sirupsen/logrus" ) +// TODO: Move to the way controller-runtime does logs instead? + // Quiet specifies whether to only print machine-readable IDs var Quiet bool From a6f69202aeb12fc9917b9bf67e42a06f0c332d21 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 27 Jan 2021 23:41:26 +0200 Subject: [PATCH 084/149] Remove the last pieces of the old runtime code. --- pkg/runtime/doc.go | 2 - pkg/runtime/identifiers.go | 63 -------------------------- pkg/runtime/meta.go | 52 --------------------- pkg/runtime/zz_generated.deepcopy.go | 67 ---------------------------- 4 files changed, 184 deletions(-) delete mode 100644 pkg/runtime/doc.go delete mode 100644 pkg/runtime/identifiers.go delete mode 100644 pkg/runtime/meta.go delete mode 100644 pkg/runtime/zz_generated.deepcopy.go diff --git a/pkg/runtime/doc.go b/pkg/runtime/doc.go deleted file mode 100644 index 4eb2a1ee..00000000 --- a/pkg/runtime/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// +k8s:deepcopy-gen=package -package runtime diff --git a/pkg/runtime/identifiers.go b/pkg/runtime/identifiers.go deleted file mode 100644 index 87bc00e2..00000000 --- a/pkg/runtime/identifiers.go +++ /dev/null @@ -1,63 +0,0 @@ -package runtime - -import ( - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// DefaultNamespace describes the default namespace name used for the system. -const DefaultNamespace = "default" - -// Identifyable is an object which can be identified -type Identifyable interface { - // GetIdentifier can return e.g. a "namespace/name" combination, which is not guaranteed - // to be unique world-wide, or alternatively a random SHA for instance - GetIdentifier() string -} - -type identifier string - -func (i identifier) GetIdentifier() string { return string(i) } - -type Metav1NameIdentifierFactory struct{} - -func (id Metav1NameIdentifierFactory) Identify(o interface{}) (Identifyable, bool) { - switch obj := o.(type) { - case metav1.Object: - if len(obj.GetNamespace()) == 0 || len(obj.GetName()) == 0 { - return nil, false - } - return NewIdentifier(fmt.Sprintf("%s/%s", obj.GetNamespace(), obj.GetName())), true - } - return nil, false -} - -type ObjectUIDIdentifierFactory struct{} - -func (id ObjectUIDIdentifierFactory) Identify(o interface{}) (Identifyable, bool) { - switch obj := o.(type) { - case Object: - if len(obj.GetUID()) == 0 { - return nil, false - } - // TODO: Make sure that runtime.APIType works with this - return NewIdentifier(string(obj.GetUID())), true - } - return nil, false -} - -var ( - // Metav1Identifier identifies an object using its metav1.ObjectMeta Name and Namespace - Metav1NameIdentifier IdentifierFactory = Metav1NameIdentifierFactory{} - // ObjectUIDIdentifier identifies an object using its libgitops/pkg/runtime.ObjectMeta UID field - ObjectUIDIdentifier IdentifierFactory = ObjectUIDIdentifierFactory{} -) - -func NewIdentifier(str string) Identifyable { - return identifier(str) -} - -type IdentifierFactory interface { - Identify(o interface{}) (id Identifyable, ok bool) -} diff --git a/pkg/runtime/meta.go b/pkg/runtime/meta.go deleted file mode 100644 index 32930e18..00000000 --- a/pkg/runtime/meta.go +++ /dev/null @@ -1,52 +0,0 @@ -package runtime - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/yaml" -) - -// PartialObjectImpl is a struct implementing PartialObject, used for -// unmarshalling unknown objects into this intermediate type -// where .Name, .UID, .Kind and .APIVersion become easily available -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type PartialObjectImpl struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata"` -} - -func (po *PartialObjectImpl) IsPartialObject() {} - -// This constructor ensures the PartialObjectImpl fields are not nil. -// TODO: Make this multi-document-aware? -func NewPartialObject(frame []byte) (PartialObject, error) { - obj := &PartialObjectImpl{} - - // The yaml package supports both YAML and JSON. Don't use the serializer, as the APIType - // wrapper is not registered in any scheme. - if err := yaml.Unmarshal(frame, obj); err != nil { - return nil, err - } - - return obj, nil -} - -var _ Object = &PartialObjectImpl{} -var _ PartialObject = &PartialObjectImpl{} - -// Object is an union of the Object interfaces that are accessible for a -// type that embeds both metav1.TypeMeta and metav1.ObjectMeta. -type Object interface { - runtime.Object - metav1.ObjectMetaAccessor - metav1.Object -} - -// PartialObject is a partially-decoded object, where only metadata has been loaded. -type PartialObject interface { - Object - - // IsPartialObject is a dummy function for signalling that this is a partially-loaded object - // i.e. only TypeMeta and ObjectMeta are stored in memory. - IsPartialObject() -} diff --git a/pkg/runtime/zz_generated.deepcopy.go b/pkg/runtime/zz_generated.deepcopy.go deleted file mode 100644 index 20beb72f..00000000 --- a/pkg/runtime/zz_generated.deepcopy.go +++ /dev/null @@ -1,67 +0,0 @@ -// +build !ignore_autogenerated - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package runtime - -import ( - pkgruntime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Metav1NameIdentifierFactory) DeepCopyInto(out *Metav1NameIdentifierFactory) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metav1NameIdentifierFactory. -func (in *Metav1NameIdentifierFactory) DeepCopy() *Metav1NameIdentifierFactory { - if in == nil { - return nil - } - out := new(Metav1NameIdentifierFactory) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectUIDIdentifierFactory) DeepCopyInto(out *ObjectUIDIdentifierFactory) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectUIDIdentifierFactory. -func (in *ObjectUIDIdentifierFactory) DeepCopy() *ObjectUIDIdentifierFactory { - if in == nil { - return nil - } - out := new(ObjectUIDIdentifierFactory) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PartialObjectImpl) DeepCopyInto(out *PartialObjectImpl) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectImpl. -func (in *PartialObjectImpl) DeepCopy() *PartialObjectImpl { - if in == nil { - return nil - } - out := new(PartialObjectImpl) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new pkgruntime.Object. -func (in *PartialObjectImpl) DeepCopyObject() pkgruntime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} From f250867c7ea6e27ae0d0d48f46e13e0c03e28be5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 27 Jan 2021 23:41:45 +0200 Subject: [PATCH 085/149] Update the sample applications. --- cmd/common/common.go | 20 ++--- cmd/sample-app/main.go | 168 +++++++++++++++++++++++++++++++++---- cmd/sample-gitops/main.go | 169 +++++++++++++++++++++++++++----------- cmd/sample-watch/main.go | 56 ++++++++++--- 4 files changed, 328 insertions(+), 85 deletions(-) diff --git a/cmd/common/common.go b/cmd/common/common.go index dcba7c68..f011dace 100644 --- a/cmd/common/common.go +++ b/cmd/common/common.go @@ -13,8 +13,8 @@ import ( "github.com/spf13/pflag" "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1" "github.com/weaveworks/libgitops/cmd/sample-app/version" - "github.com/weaveworks/libgitops/pkg/runtime" - "github.com/weaveworks/libgitops/pkg/storage" + "github.com/weaveworks/libgitops/pkg/storage/client" + "github.com/weaveworks/libgitops/pkg/storage/core" ) var ( @@ -25,10 +25,6 @@ func init() { rand.Seed(time.Now().UnixNano()) } -func CarKeyForName(name string) storage.ObjectKey { - return storage.NewObjectKey(storage.NewKindKey(CarGVK), runtime.NewIdentifier("default/"+name)) -} - func NewCar(name string) *v1alpha1.Car { obj := &v1alpha1.Car{} obj.Name = name @@ -38,17 +34,17 @@ func NewCar(name string) *v1alpha1.Car { return obj } -func SetNewCarStatus(s storage.Storage, key storage.ObjectKey) error { - obj, err := s.Get(key) +func SetNewCarStatus(ctx context.Context, c client.Client, name string) error { + car := &v1alpha1.Car{} + err := c.Get(ctx, core.ObjectKey{Name: name}, car) if err != nil { return err } - car := obj.(*v1alpha1.Car) car.Status.Distance = rand.Uint64() car.Status.Speed = rand.Float64() * 100 - return s.Update(car) + return c.Update(ctx, car) } func ParseVersionFlag() { @@ -75,8 +71,8 @@ func NewEcho() *echo.Echo { func StartEcho(e *echo.Echo) error { // Start the server go func() { - if err := e.Start(":8888"); err != nil { - e.Logger.Info("shutting down the server") + if err := e.Start(":8881"); err != nil { + e.Logger.Info("shutting down the server", err) } }() diff --git a/cmd/sample-app/main.go b/cmd/sample-app/main.go index ea119a95..2812acc3 100644 --- a/cmd/sample-app/main.go +++ b/cmd/sample-app/main.go @@ -2,7 +2,10 @@ package main import ( "bytes" + "context" + "encoding/json" "fmt" + "io/ioutil" "net/http" "os" @@ -10,12 +13,19 @@ import ( "github.com/sirupsen/logrus" "github.com/spf13/pflag" "github.com/weaveworks/libgitops/cmd/common" + "github.com/weaveworks/libgitops/cmd/common/logs" "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/scheme" "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1" - "github.com/weaveworks/libgitops/pkg/logs" - "github.com/weaveworks/libgitops/pkg/runtime" "github.com/weaveworks/libgitops/pkg/serializer" - "github.com/weaveworks/libgitops/pkg/storage" + "github.com/weaveworks/libgitops/pkg/storage/backend" + "github.com/weaveworks/libgitops/pkg/storage/client" + "github.com/weaveworks/libgitops/pkg/storage/core" + "github.com/weaveworks/libgitops/pkg/storage/filesystem" + "github.com/weaveworks/libgitops/pkg/storage/kube" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" + ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" ) var manifestDirFlag = pflag.String("data-dir", "/tmp/libgitops/manifest", "Where to store the YAML files") @@ -25,27 +35,43 @@ func main() { common.ParseVersionFlag() // Run the application - if err := run(); err != nil { + if err := run(*manifestDirFlag); err != nil { fmt.Println(err) os.Exit(1) } } -func run() error { +func run(manifestDir string) error { + ctx := context.Background() // Create the manifest directory - if err := os.MkdirAll(*manifestDirFlag, 0755); err != nil { + if err := os.MkdirAll(manifestDir, 0755); err != nil { return err } // Set the log level logs.Logger.SetLevel(logrus.InfoLevel) - plainStorage := storage.NewGenericStorage( - storage.NewGenericRawStorage(*manifestDirFlag, v1alpha1.SchemeGroupVersion, serializer.ContentTypeYAML), - scheme.Serializer, - []runtime.IdentifierFactory{runtime.Metav1NameIdentifier}, + s, err := filesystem.NewSimpleStorage( + manifestDir, + core.StaticNamespacer{NamespacedIsDefaultPolicy: false}, + filesystem.SimpleFileFinderOptions{ + DisableGroupDirectory: true, + ContentType: serializer.ContentTypeYAML, + }, ) - defer func() { _ = plainStorage.Close() }() + if err != nil { + return err + } + + b, err := backend.NewGeneric(s, scheme.Serializer, kube.NewNamespaceEnforcer(), nil, nil) + if err != nil { + return err + } + + plainClient, err := client.NewGeneric(b, scheme.Serializer.Patcher()) + if err != nil { + return err + } e := common.NewEcho() @@ -55,7 +81,8 @@ func run() error { return echo.NewHTTPError(http.StatusBadRequest, "Please set name") } - obj, err := plainStorage.Get(common.CarKeyForName(name)) + obj := &v1alpha1.Car{} + err := plainClient.Get(ctx, core.ObjectKey{Name: name}, obj) if err != nil { return err } @@ -66,13 +93,92 @@ func run() error { return c.JSONBlob(http.StatusOK, content.Bytes()) }) + e.GET("/meta/", func(c echo.Context) error { + list := &metav1.PartialObjectMetadataList{} + list.SetGroupVersionKind(v1alpha1.SchemeGroupVersion.WithKind("CarList")) + err := plainClient.List(ctx, list) + if err != nil { + return err + } + var content bytes.Buffer + if err := scheme.Serializer.Encoder().Encode(serializer.NewJSONFrameWriter(&content), list); err != nil { + return err + } + return c.JSONBlob(http.StatusOK, content.Bytes()) + }) + + e.GET("/meta/:name", func(c echo.Context) error { + name := c.Param("name") + if len(name) == 0 { + return echo.NewHTTPError(http.StatusBadRequest, "Please set name") + } + + obj := &metav1.PartialObjectMetadata{} + obj.SetGroupVersionKind(v1alpha1.SchemeGroupVersion.WithKind("Car")) + err := plainClient.Get(ctx, core.ObjectKey{ + Name: name, + }, obj) + if err != nil { + return err + } + var content bytes.Buffer + if err := scheme.Serializer.Encoder().Encode(serializer.NewJSONFrameWriter(&content), obj); err != nil { + return err + } + return c.JSONBlob(http.StatusOK, content.Bytes()) + }) + + e.GET("/unstructured/", func(c echo.Context) error { + list := &unstructured.UnstructuredList{} + list.SetGroupVersionKind(v1alpha1.SchemeGroupVersion.WithKind("CarList")) + err := plainClient.List(ctx, list) + if err != nil { + return err + } + var content bytes.Buffer + if err := scheme.Serializer.Encoder().Encode(serializer.NewJSONFrameWriter(&content), list); err != nil { + return err + } + var newcontent bytes.Buffer + if err := json.Indent(&newcontent, content.Bytes(), "", " "); err != nil { + return err + } + return c.JSONBlob(http.StatusOK, newcontent.Bytes()) + }) + + e.GET("/unstructured/:name", func(c echo.Context) error { + name := c.Param("name") + if len(name) == 0 { + return echo.NewHTTPError(http.StatusBadRequest, "Please set name") + } + + obj := &unstructured.Unstructured{} + obj.SetGroupVersionKind(v1alpha1.SchemeGroupVersion.WithKind("Car")) + err := plainClient.Get(ctx, core.ObjectKey{ + Name: name, + }, obj) + if err != nil { + return err + } + var content bytes.Buffer + // This does for some reason not pretty-encode the output + if err := scheme.Serializer.Encoder().Encode(serializer.NewJSONFrameWriter(&content), obj); err != nil { + return err + } + var newcontent bytes.Buffer + if err := json.Indent(&newcontent, content.Bytes(), "", " "); err != nil { + return err + } + return c.JSONBlob(http.StatusOK, newcontent.Bytes()) + }) + e.POST("/plain/:name", func(c echo.Context) error { name := c.Param("name") if len(name) == 0 { return echo.NewHTTPError(http.StatusBadRequest, "Please set name") } - if err := plainStorage.Create(common.NewCar(name)); err != nil { + if err := plainClient.Create(ctx, common.NewCar(name)); err != nil { return err } return c.String(200, "OK!") @@ -84,11 +190,45 @@ func run() error { return echo.NewHTTPError(http.StatusBadRequest, "Please set name") } - if err := common.SetNewCarStatus(plainStorage, common.CarKeyForName(name)); err != nil { + if err := common.SetNewCarStatus(ctx, plainClient, name); err != nil { return err } return c.String(200, "OK!") }) + e.PATCH("/plain/:name", func(c echo.Context) error { + name := c.Param("name") + if len(name) == 0 { + return echo.NewHTTPError(http.StatusBadRequest, "Please set name") + } + + body, err := ioutil.ReadAll(c.Request().Body) + if err != nil { + return err + } + c.Request().Body.Close() + + car := &v1alpha1.Car{} + err = plainClient.Get(ctx, core.ObjectKey{ + Name: name, + }, car) + if err != nil { + return err + } + + if err := plainClient.Patch(ctx, car, ctrlclient.RawPatch(types.MergePatchType, body)); err != nil { + return err + } + + return c.JSON(200, car) + }) + return common.StartEcho(e) } + +/* +type noNamespacesRESTMapper struct{} + +func (noNamespacesRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + return &meta.RESTMapping{Scope: meta.RESTScopeRoot}, nil +}*/ diff --git a/cmd/sample-gitops/main.go b/cmd/sample-gitops/main.go index e8c21805..d18a9d55 100644 --- a/cmd/sample-gitops/main.go +++ b/cmd/sample-gitops/main.go @@ -1,9 +1,11 @@ package main import ( + "bytes" "context" "fmt" "io/ioutil" + "math/rand" "net/http" "os" "time" @@ -15,14 +17,22 @@ import ( "github.com/sirupsen/logrus" "github.com/spf13/pflag" "github.com/weaveworks/libgitops/cmd/common" + "github.com/weaveworks/libgitops/cmd/common/logs" "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/scheme" - "github.com/weaveworks/libgitops/pkg/gitdir" - "github.com/weaveworks/libgitops/pkg/logs" - "github.com/weaveworks/libgitops/pkg/storage" - "github.com/weaveworks/libgitops/pkg/storage/transaction" - githubpr "github.com/weaveworks/libgitops/pkg/storage/transaction/pullrequest/github" - "github.com/weaveworks/libgitops/pkg/storage/watch" - "github.com/weaveworks/libgitops/pkg/storage/watch/update" + "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1" + "github.com/weaveworks/libgitops/pkg/serializer" + "github.com/weaveworks/libgitops/pkg/storage/backend" + "github.com/weaveworks/libgitops/pkg/storage/client" + "github.com/weaveworks/libgitops/pkg/storage/client/transactional" + "github.com/weaveworks/libgitops/pkg/storage/client/transactional/distributed" + "github.com/weaveworks/libgitops/pkg/storage/client/transactional/distributed/git" + githubpr "github.com/weaveworks/libgitops/pkg/storage/client/transactional/distributed/git/github" + "github.com/weaveworks/libgitops/pkg/storage/core" + "github.com/weaveworks/libgitops/pkg/storage/event" + "github.com/weaveworks/libgitops/pkg/storage/filesystem" + unstructuredevent "github.com/weaveworks/libgitops/pkg/storage/filesystem/unstructured/event" + "github.com/weaveworks/libgitops/pkg/storage/kube" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) var ( @@ -30,8 +40,9 @@ var ( authorNameFlag = pflag.String("author-name", defaultAuthorName, "Author name for Git commits") authorEmailFlag = pflag.String("author-email", defaultAuthorEmail, "Author email for Git commits") gitURLFlag = pflag.String("git-url", "", "HTTPS Git URL; where the Git repository is, e.g. https://github.com/luxas/ignite-gitops") - prAssigneeFlag = pflag.StringSlice("pr-assignees", nil, "What user logins to assign for the created PR. The user must have pull access to the repo.") prMilestoneFlag = pflag.String("pr-milestone", "", "What milestone to tag the PR with") + prAssigneesFlag = pflag.StringSlice("pr-assignees", nil, "What user logins to assign for the created PR. The user must have pull access to the repo.") + prLabelsFlag = pflag.StringSlice("pr-labels", nil, "What labels to apply on the created PR. The labels must already exist. E.g. \"user/bot,actuator/libgitops,kind/status-update\"") ) const ( @@ -46,7 +57,16 @@ func main() { common.ParseVersionFlag() // Run the application - if err := run(*identityFlag, *gitURLFlag, os.Getenv("GITHUB_TOKEN"), *authorNameFlag, *authorEmailFlag); err != nil { + if err := run( + *identityFlag, + *gitURLFlag, + os.Getenv("GITHUB_TOKEN"), + *authorNameFlag, + *authorEmailFlag, + *prMilestoneFlag, + *prAssigneesFlag, + *prLabelsFlag, + ); err != nil { fmt.Println(err) os.Exit(1) } @@ -60,7 +80,8 @@ func expandAndRead(filePath string) ([]byte, error) { return ioutil.ReadFile(expandedPath) } -func run(identityFile, gitURL, ghToken, authorName, authorEmail string) error { +func run(identityFile, gitURL, ghToken, authorName, authorEmail, prMilestone string, + prAssignees, prLabels []string) error { // Validate parameters if len(identityFile) == 0 { return fmt.Errorf("--identity-file is required") @@ -69,7 +90,7 @@ func run(identityFile, gitURL, ghToken, authorName, authorEmail string) error { return fmt.Errorf("--git-url is required") } if len(ghToken) == 0 { - return fmt.Errorf("--github-token is required") + return fmt.Errorf("GITHUB_TOKEN is required") } if len(authorName) == 0 { return fmt.Errorf("--author-name is required") @@ -78,6 +99,9 @@ func run(identityFile, gitURL, ghToken, authorName, authorEmail string) error { return fmt.Errorf("--author-email is required") } + // Set the log level + logs.Logger.SetLevel(logrus.TraceLevel) + // Read the identity and known_hosts files identityContent, err := expandAndRead(identityFile) if err != nil { @@ -101,58 +125,101 @@ func run(identityFile, gitURL, ghToken, authorName, authorEmail string) error { } // Authenticate to the GitDirectory using Git SSH - authMethod, err := gitdir.NewSSHAuthMethod(identityContent, knownHostsContent) + authMethod, err := git.NewSSHAuthMethod(identityContent, knownHostsContent) if err != nil { return err } - // Construct the GitDirectory implementation which backs the storage - gitDir, err := gitdir.NewGitDirectory(repoRef, gitdir.GitDirectoryOptions{ + ctx, cancel := context.WithCancel(context.Background()) + + defer func() { cancel() }() + + // Construct the LocalClone implementation which backs the storage + localClone, err := git.NewLocalClone(ctx, repoRef, git.LocalCloneOptions{ Branch: "master", - Interval: 10 * time.Second, AuthMethod: authMethod, }) if err != nil { return err } - // Create a new PR provider for the GitStorage - prProvider, err := githubpr.NewGitHubPRProvider(ghClient) + rawManifest, err := unstructuredevent.NewManifest( + localClone.Dir(), + filesystem.DefaultContentTyper, + core.StaticNamespacer{NamespacedIsDefaultPolicy: false}, // all objects root-spaced + &core.SerializerObjectRecognizer{Serializer: scheme.Serializer}, + filesystem.DefaultPathExcluders(), + ) if err != nil { return err } - // Create a new GitStorage using the GitDirectory, PR provider, and Serializer - gitStorage, err := transaction.NewGitStorage(gitDir, prProvider, scheme.Serializer) + + // Create the channel to receive events to, and register it with the EventStorage + updates := make(event.ObjectEventStream, 4096) + if err := rawManifest.WatchForObjectEvents(ctx, updates); err != nil { + return err + } + + defer func() { _ = rawManifest.Close() }() + + b, err := backend.NewGeneric(rawManifest, scheme.Serializer, kube.NewNamespaceEnforcer(), nil, nil) if err != nil { return err } - // Set the log level - logs.Logger.SetLevel(logrus.InfoLevel) + gitClient, err := client.NewGeneric(b, scheme.Serializer.Patcher()) + if err != nil { + return err + } - watchStorage, err := watch.NewManifestStorage(gitDir.Dir(), scheme.Serializer) + txGeneralClient, err := transactional.NewGeneric(gitClient, localClone, nil) if err != nil { return err } - defer func() { _ = watchStorage.Close() }() - updates := make(chan update.Update, 4096) - watchStorage.SetUpdateStream(updates) + txClient, err := distributed.NewClient(txGeneralClient, localClone) + if err != nil { + return err + } + + // Create a new CommitHook for sending PRs + prCommitHook, err := githubpr.NewGitHubPRCommitHandler(ghClient, localClone.RepositoryRef()) + if err != nil { + return err + } + + // Register the PR CommitHook with the BranchManager + // This needs to be done after the distributed.NewClient call, so + // it has been able to handle pushing of the branch first. + localClone.CommitHookChain().Register(prCommitHook) + + // Start the sync loop in the background + txClient.StartResyncLoop(ctx, 15*time.Second) go func() { for upd := range updates { - logrus.Infof("Got %s update for: %v %v", upd.Event, upd.PartialObject.GetObjectKind().GroupVersionKind(), upd.PartialObject.GetObjectMeta()) + logrus.Infof("Got %s update for: %v %v", upd.Type, upd.ID.GroupKind(), upd.ID.ObjectKey()) } }() e := common.NewEcho() e.GET("/git/", func(c echo.Context) error { - objs, err := gitStorage.List(storage.NewKindKey(common.CarGVK)) - if err != nil { + list := &unstructured.UnstructuredList{} + list.SetGroupVersionKind(v1alpha1.SchemeGroupVersion.WithKind("CarList")) + + /*if br := c.QueryParam("branch"); len(br) != 0 { + ctx = core.WithVersionRef(ctx, core.NewBranchRef(br)) + }*/ + + if err := txClient.List(ctx, list); err != nil { + return err + } + var content bytes.Buffer + if err := scheme.Serializer.Encoder().Encode(serializer.NewJSONFrameWriter(&content), list); err != nil { return err } - return c.JSON(http.StatusOK, objs) + return c.JSONBlob(http.StatusOK, content.Bytes()) }) e.PUT("/git/:name", func(c echo.Context) error { @@ -161,26 +228,36 @@ func run(identityFile, gitURL, ghToken, authorName, authorEmail string) error { return echo.NewHTTPError(http.StatusBadRequest, "Please set name") } - objKey := common.CarKeyForName(name) - err := gitStorage.Transaction(context.Background(), fmt.Sprintf("%s-update-", name), func(ctx context.Context, s storage.Storage) (transaction.CommitResult, error) { + car := v1alpha1.Car{} + carKey := core.ObjectKey{Name: name} - // Update the status of the car - if err := common.SetNewCarStatus(s, objKey); err != nil { - return nil, err - } + branchCtx := core.WithVersionRef(ctx, core.NewBranchRef(localClone.MainBranch())) - return &transaction.GenericPullRequestResult{ - CommitResult: &transaction.GenericCommitResult{ - AuthorName: authorName, - AuthorEmail: authorEmail, - Title: "Update Car speed", - Description: "We really need to sync this state!", + headBranch := fmt.Sprintf("%s-update-", name) + err := txClient. + BranchTransaction(branchCtx, headBranch). + Get(carKey, &car). + Custom(func(ctx context.Context) error { + car.Status.Distance = rand.Uint64() + car.Status.Speed = rand.Float64() * 100 + return nil + }). + Update(&car). + CreateTx(githubpr.GenericPullRequest{ + Commit: transactional.GenericCommit{ + Author: transactional.GenericCommitAuthor{ + Name: authorName, + Email: authorEmail, + }, + Message: transactional.GenericCommitMessage{ + Title: "Update Car speed", + Description: "We really need to sync this state!", + }, }, - Labels: []string{"user/bot", "actuator/libgitops", "kind/status-update"}, - Assignees: *prAssigneeFlag, - Milestone: *prMilestoneFlag, - }, nil - }) + Labels: prLabels, + Assignees: prAssignees, + Milestone: prMilestone, + }).Error() if err != nil { return err } diff --git a/cmd/sample-watch/main.go b/cmd/sample-watch/main.go index ef1aec0a..c81a279a 100644 --- a/cmd/sample-watch/main.go +++ b/cmd/sample-watch/main.go @@ -2,6 +2,7 @@ package main import ( "bytes" + "context" "fmt" "net/http" "os" @@ -10,11 +11,17 @@ import ( "github.com/sirupsen/logrus" "github.com/spf13/pflag" "github.com/weaveworks/libgitops/cmd/common" + "github.com/weaveworks/libgitops/cmd/common/logs" "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/scheme" - "github.com/weaveworks/libgitops/pkg/logs" + "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1" "github.com/weaveworks/libgitops/pkg/serializer" - "github.com/weaveworks/libgitops/pkg/storage/watch" - "github.com/weaveworks/libgitops/pkg/storage/watch/update" + "github.com/weaveworks/libgitops/pkg/storage/backend" + "github.com/weaveworks/libgitops/pkg/storage/client" + "github.com/weaveworks/libgitops/pkg/storage/core" + "github.com/weaveworks/libgitops/pkg/storage/event" + "github.com/weaveworks/libgitops/pkg/storage/filesystem" + unstructuredevent "github.com/weaveworks/libgitops/pkg/storage/filesystem/unstructured/event" + "github.com/weaveworks/libgitops/pkg/storage/kube" ) var watchDirFlag = pflag.String("watch-dir", "/tmp/libgitops/watch", "Where to watch for YAML/JSON manifests") @@ -24,33 +31,55 @@ func main() { common.ParseVersionFlag() // Run the application - if err := run(); err != nil { + if err := run(*watchDirFlag); err != nil { fmt.Println(err) os.Exit(1) } } -func run() error { +func run(watchDir string) error { // Create the watch directory if err := os.MkdirAll(*watchDirFlag, 0755); err != nil { return err } // Set the log level - logs.Logger.SetLevel(logrus.InfoLevel) + logs.Logger.SetLevel(logrus.TraceLevel) - watchStorage, err := watch.NewManifestStorage(*watchDirFlag, scheme.Serializer) + ctx := context.Background() + + rawManifest, err := unstructuredevent.NewManifest( + watchDir, + filesystem.DefaultContentTyper, + core.StaticNamespacer{NamespacedIsDefaultPolicy: false}, // all objects root-spaced + &core.SerializerObjectRecognizer{Serializer: scheme.Serializer}, + filesystem.DefaultPathExcluders(), + ) + if err != nil { + return err + } + + // Create the channel to receive events to, and register it with the EventStorage + updates := make(event.ObjectEventStream, 4096) + if err := rawManifest.WatchForObjectEvents(ctx, updates); err != nil { + return err + } + + b, err := backend.NewGeneric(rawManifest, scheme.Serializer, kube.NewNamespaceEnforcer(), nil, nil) + if err != nil { + return err + } + + watchStorage, err := client.NewGeneric(b, scheme.Serializer.Patcher()) if err != nil { return err } - defer func() { _ = watchStorage.Close() }() - updates := make(chan update.Update, 4096) - watchStorage.SetUpdateStream(updates) + defer func() { _ = rawManifest.Close() }() go func() { for upd := range updates { - logrus.Infof("Got %s update for: %v %v", upd.Event, upd.PartialObject.GetObjectKind().GroupVersionKind(), upd.PartialObject.GetObjectMeta()) + logrus.Infof("Got %s update for: %v %v", upd.Type, upd.ID.GroupKind(), upd.ID.ObjectKey()) } }() @@ -62,7 +91,8 @@ func run() error { return echo.NewHTTPError(http.StatusBadRequest, "Please set name") } - obj, err := watchStorage.Get(common.CarKeyForName(name)) + obj := &v1alpha1.Car{} + err := watchStorage.Get(ctx, core.ObjectKey{Name: name}, obj) if err != nil { return err } @@ -79,7 +109,7 @@ func run() error { return echo.NewHTTPError(http.StatusBadRequest, "Please set name") } - if err := common.SetNewCarStatus(watchStorage, common.CarKeyForName(name)); err != nil { + if err := common.SetNewCarStatus(ctx, watchStorage, name); err != nil { return err } return c.String(200, "OK!") From ab466b47f016bbd4032a7f83b58b0e44d0427cd1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 27 Jan 2021 23:42:10 +0200 Subject: [PATCH 086/149] Update go.mod --- go.mod | 31 ++-- go.sum | 552 ++++++++++++++++++++++++--------------------------------- 2 files changed, 250 insertions(+), 333 deletions(-) diff --git a/go.mod b/go.mod index c03013fb..936a5a0d 100644 --- a/go.mod +++ b/go.mod @@ -1,31 +1,30 @@ module github.com/weaveworks/libgitops -go 1.14 +go 1.15 -replace ( - github.com/docker/distribution => github.com/docker/distribution v2.7.1+incompatible - github.com/googleapis/gnostic => github.com/googleapis/gnostic v0.3.0 -) +replace github.com/docker/distribution => github.com/docker/distribution v2.7.1+incompatible require ( - github.com/fluxcd/go-git-providers v0.0.2 - github.com/fluxcd/toolkit v0.0.1-beta.2 - github.com/go-git/go-git/v5 v5.1.0 - github.com/go-openapi/spec v0.19.8 + github.com/evanphx/json-patch v4.9.0+incompatible + github.com/fluxcd/go-git-providers v0.0.3 + github.com/fluxcd/pkg/ssh v0.0.5 + github.com/go-git/go-git/v5 v5.2.0 + github.com/go-openapi/spec v0.20.0 github.com/google/go-github/v32 v32.1.0 github.com/labstack/echo v3.3.10+incompatible github.com/labstack/gommon v0.3.0 // indirect github.com/mattn/go-isatty v0.0.12 // indirect github.com/mitchellh/go-homedir v1.1.0 github.com/rjeczalik/notify v0.9.2 - github.com/sirupsen/logrus v1.6.0 + github.com/sirupsen/logrus v1.7.0 + github.com/spf13/afero v1.2.2 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.6.1 - golang.org/x/net v0.0.0-20200625001655-4c5254603344 // indirect - golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d - k8s.io/apimachinery v0.18.6 - k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 - sigs.k8s.io/controller-runtime v0.6.0 - sigs.k8s.io/kustomize/kyaml v0.1.11 + golang.org/x/sys v0.0.0-20210108172913-0df2131ae363 + k8s.io/api v0.19.2 + k8s.io/apimachinery v0.19.6 + k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 + sigs.k8s.io/controller-runtime v0.7.0 + sigs.k8s.io/kustomize/kyaml v0.10.5 sigs.k8s.io/yaml v1.2.0 ) diff --git a/go.sum b/go.sum index c1ecf376..b4012697 100644 --- a/go.sum +++ b/go.sum @@ -1,28 +1,34 @@ -bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/360EntSecGroup-Skylar/excelize v1.4.1/go.mod h1:vnax29X2usfl7HHkBrX5EvSCJcmH3dT9luvxzu8iGAE= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= -github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/sprig/v3 v3.0.2/go.mod h1:oesJ8kPONMONaZgtiHNzUShJbksypC5kWczhZAf6+aU= -github.com/Masterminds/vcs v1.13.1/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= -github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= github.com/PuerkitoBio/goquery v1.5.0/go.mod h1:qD2PgZ9lccMbQlc7eEOjaeRlFQON7xY8kdmcsrnKqMg= github.com/PuerkitoBio/purell v1.0.0 h1:0GoNN3taZV6QI81IXgCbxMyEaJDXMSIjArYBCYzVVvs= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= @@ -33,12 +39,13 @@ github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2 h1:JCHLVE3B+kJd github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs= github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= @@ -48,72 +55,46 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= -github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/bombsimon/wsl v1.2.5/go.mod h1:43lEF/i0kpXbLCeDXL9LMT8c92HyBywXb0AsgMHYngM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= -github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= -github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= -github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= -github.com/deislabs/oras v0.8.1/go.mod h1:Mx0rMSbBNaNfY9hjpccEnxkOqJL6KGjtxNHPLC4G4As= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/docker/cli v0.0.0-20200130152716-5d0cf8839492/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= -github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustmop/soup v1.1.2-0.20190516214245-38228baa104e/go.mod h1:CgNC6SGbT+Xb8wGGvzilttZL1mc5sQ/5KkcxsZttMIk= -github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633 h1:H2pdYOb3KQ1/YsqVWoWNLQO+fusocsw354rqGTZtAgw= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -123,18 +104,15 @@ github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= -github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= +github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fluxcd/go-git-providers v0.0.2 h1:NGJeJl1TOJKbxtQkRL9JOk5lIopR1XNi6hGgZC5+8IE= -github.com/fluxcd/go-git-providers v0.0.2/go.mod h1:2Fp9GDxIcllNR7pm5clXhInPyue4VggecaH83KhkpNw= -github.com/fluxcd/kustomize-controller v0.0.1-beta.2/go.mod h1:mLeipvpQkyof6b5IHNtqeA8CmbjfVIf92UyKkpeBY98= -github.com/fluxcd/source-controller v0.0.1-beta.2/go.mod h1:tmscNdCxEt7+Xt2g1+bI38hMPw2leYMFAaCn4UlMGuw= -github.com/fluxcd/toolkit v0.0.1-beta.2 h1:JG80AUIGd936QJ6Vs/xZweoKcE6j7Loua5Wn6Q/pVh8= -github.com/fluxcd/toolkit v0.0.1-beta.2/go.mod h1:NqDXj2aeVMbVkrCHeP/r0um+edXXyeGlG/9pKZLqGdM= +github.com/fluxcd/go-git-providers v0.0.3 h1:pquQvTpd1a4V1efPyZWuVPeIKrTgV8QRoDY0VGH+qiw= +github.com/fluxcd/go-git-providers v0.0.3/go.mod h1:iaXf3nEq8MB/LzxfbNcCl48sAtIReUU7jqjJ7CEnfFQ= +github.com/fluxcd/pkg/ssh v0.0.5 h1:rnbFZ7voy2JBlUfMbfyqArX2FYaLNpDhccGFC3qW83A= +github.com/fluxcd/pkg/ssh v0.0.5/go.mod h1:7jXPdXZpc0ttMNz2kD9QuMi3RNn/e0DOFbj0Tij/+Hs= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= @@ -147,27 +125,28 @@ github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/go-critic/go-critic v0.3.5-0.20190904082202-d79a9f0c64db/go.mod h1:+sE8vrLDS2M0pZkBk0wy6+nLdKexVDrl/jBqQOTDThA= github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= github.com/go-git/go-billy/v5 v5.0.0 h1:7NQHvd9FVid8VL4qVUMm8XifBK+2xCoZ2lSk0agRrHM= github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-git-fixtures/v4 v4.0.1 h1:q+IFMfLx200Q3scvt2hN79JsEzy4AmBTp/pqnefH+Bc= -github.com/go-git/go-git-fixtures/v4 v4.0.1/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= -github.com/go-git/go-git/v5 v5.0.0/go.mod h1:oYD8y9kWsGINPFJoLdaScGCN6dlKg23blmClfZwtUVA= -github.com/go-git/go-git/v5 v5.1.0 h1:HxJn9g/E7eYvKW3Fm7Jt4ee8LXfPOm/H1cdDu8vEssk= -github.com/go-git/go-git/v5 v5.1.0/go.mod h1:ZKfuPUoY1ZqIG4QG9BDBh3G4gLM5zvPuSJAozQrZuyM= +github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12 h1:PbKy9zOy4aAKrJ5pibIRpVO2BXnK1Tlcg+caKI7Ox5M= +github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= +github.com/go-git/go-git/v5 v5.2.0 h1:YPBLG/3UK1we1ohRkncLjaXWLW+HKp5QNM/jTli2JgI= +github.com/go-git/go-git/v5 v5.2.0/go.mod h1:kh02eMX+wdqqxgNMEyq8YgwlIOsDOa9homkUq1PoTMs= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-logr/zapr v0.1.0 h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54= -github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= -github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.3.0 h1:q4c+kbcR0d5rSurhBR8dIgieOaYpXtsdTYfx22Cu6rs= +github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/zapr v0.2.0 h1:v6Ji8yBW77pva6NkJKQdHLAJKrIJKRHz0RXwPqCHSR4= +github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -184,6 +163,8 @@ github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9 github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9 h1:tF+augKRWlWx0J0B7ZyyKSiTyV6E1zZe+7b3qQlcEf8= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= @@ -192,6 +173,8 @@ github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= @@ -209,8 +192,8 @@ github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wab github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/spec v0.19.5 h1:Xm0Ao53uqnk9QE/LlYV5DEU09UAgpliA85QoT9LzqPw= github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= -github.com/go-openapi/spec v0.19.8 h1:qAdZLh1r6QF/hI/gTq+TJTvsQUodZsM7KLqkAJdiJNg= -github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.20.0 h1:HGLc8AJ7ynOxwv0Lq4TsnwLsWMawHAYiJIFzbcML86I= +github.com/go-openapi/spec v0.20.0/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= @@ -224,71 +207,42 @@ github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88d github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.12 h1:Bc0bnY2c3AoF7Gc+IMIAQQsD8fLHjHpc19wXvYuayQI= +github.com/go-openapi/swag v0.19.12/go.mod h1:eFdyEBkTdoAf/9RXBvj4cr1nH7GD8Kzo5HTt47gr72M= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= -github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= -github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= -github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= -github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086/go.mod h1:mP93XdblcopXwlyN4X4uodxXQhldPGZbcEJIimQHrkg= -github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= -github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= -github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30/go.mod h1:SV2ur98SGypH1UjcPpCatrV5hPazG6+IfNHbkDXBRrk= -github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= -github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoMrjiy4zvdS+Bg6z9jZH82QXwkcgCBX6nOfnmdaHks= -github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= -github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= -github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= -github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= -github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= -github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= -github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0= -github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= -github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3/go.mod h1:JXrF4TWy4tXYn62/9x8Wm/K/dm06p8tCKwFRDPZG/1o= -github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU= -github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.21.0/go.mod h1:phxpHK52q7SE+5KpPnti4oZTdFCEsn/tKN+nFvCKXfk= -github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU= -github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= -github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= -github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= -github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI= -github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4= -github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= -github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho= -github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8= -github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -297,32 +251,31 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-github/v32 v32.0.0 h1:q74KVb22spUq0U5HqZ9VCYqQz8YRuOtL/39ZnfwO+NM= -github.com/google/go-github/v32 v32.0.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI= +github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github/v32 v32.1.0 h1:GWkQOdXqviCPx7Q7Fj+KyPoGm4SwHRh8rheoPhd27II= github.com/google/go-github/v32 v32.1.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gnostic v0.3.0 h1:CcQijm0XKekKjP/YCz28LXVSpgguuB+nCxaSjCe09y0= -github.com/googleapis/gnostic v0.3.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/gnostic v0.5.1 h1:A8Yhf6EtqTv9RMsU6MQTyrtV1TjWlR6xU9BsZIwuTCM= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -331,38 +284,36 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= -github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= -github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg= github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10 h1:6q5mVkdH/vYmqngx7kZQTjJ5HRsx+ImorDIEQ+beJgc= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a/go.mod h1:UJSiEoRfvx3hP73CvoARgeLjaIOjybY9vj8PUPPFGeU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= github.com/k0kubun/pp v2.3.0+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg= @@ -371,10 +322,6 @@ github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= @@ -382,9 +329,9 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -394,11 +341,6 @@ github.com/labstack/echo v3.3.10+incompatible h1:pGRcYk231ExFAyoAjAfD85kQzRJCRI8 github.com/labstack/echo v3.3.10+incompatible/go.mod h1:0INS7j/VjnFxD4E2wkz67b8cVwCLbBmJyDaka6Cmk1s= github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= -github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= -github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a h1:TpvdAwDAt1K4ANVOfcihouRdvP+MgAfDWwBuct4l6ZY= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -409,13 +351,12 @@ github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/manifoldco/promptui v0.7.0/go.mod h1:n4zTdgP0vr0S3w7/O/g98U+e0gwLScEXGwov2nIKuGQ= -github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= +github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/markbates/pkger v0.17.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= @@ -425,35 +366,27 @@ github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2y github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-shellwords v1.0.9/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= -github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= -github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= @@ -463,44 +396,37 @@ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:v github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4= +github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.8.1 h1:C5Dqfs/LeauYDX0jJXIe2SWmwCbGzx9yF8C8xy3Lh34= -github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs= +github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/paulmach/orb v0.1.3/go.mod h1:VFlX/8C+IQ1p6FTRRKzKoOPJnvEtA5G0Veuqwbu//Vk= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -509,155 +435,150 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/qri-io/starlib v0.4.2-0.20200213133954-ff2e8cd5ef8d/go.mod h1:7DPO4domFU579Ga6E61sB9VFNaniPVwJP5C4bBCu3wA= -github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= -github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= github.com/rjeczalik/notify v0.9.2 h1:MiTWrPj55mNDHEiIX5YUSKefw/+lCQVoAFmD6oQm5w8= github.com/rjeczalik/notify v0.9.2/go.mod h1:aErll2f0sUX9PXZnVNyeiObbmTlk5jnMoCa4QEjJeqM= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/securego/gosec v0.0.0-20191002120514-e680875ea14d/go.mod h1:w5+eXa0mYznDkHaMCXA4XYffjlH+cy1oyKbfzJXa2Do= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shirou/gopsutil v0.0.0-20190901111213-e4ec7b275ada/go.mod h1:WWnYX4lzhCH5h/3YBfyVA3VbLYjlMZZAQcW9ojMexNc= -github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sosedoff/gitkit v0.2.1-0.20191202022816-7182d43c6254/go.mod h1:A+o6ZazfVJwetlcHz3ah6th66XcBdsyzLo+aBt/AsK4= -github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.3-0.20181224173747-660f15d67dbb/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= -github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= -github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= -github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/uudashr/gocognit v0.0.0-20190926065955-1655d0de0517/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= github.com/valyala/fasttemplate v1.0.1 h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8WdUSz8= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= -github.com/valyala/quicktemplate v1.2.0/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4= -github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/xanzy/go-gitlab v0.33.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug= github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= -github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8= github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yujunz/go-getter v1.4.1-lite/go.mod h1:sbmqxXjyLunH1PkF3n7zSlnVeMvmYUuIl9ZVs/7NyCc= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.starlark.net v0.0.0-20190528202925-30ae18b8564f/go.mod h1:c1/X6cHgvdXj6pUlmWKMkuqRnW4K8x2vwt6JAaaircg= go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM= +go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= -golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073 h1:xMPOj6Pz6UipU1wXLkrtqpHbR0AVFnyPEQq/wRWz9lM= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -669,26 +590,32 @@ golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -696,7 +623,6 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -704,9 +630,6 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180926160741-c2ed4eda69e7/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -716,88 +639,116 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d h1:QQrM/CCYEzTs91GZylDCQjGHudbPTxF/1fvXdVh5lMo= -golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210108172913-0df2131ae363 h1:wHn06sgWHMO1VsQ8F+KzDJx/JzqfsNLnc+oEi07qD7s= +golang.org/x/sys v0.0.0-20210108172913-0df2131ae363/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59 h1:QjA/9ArTfVTLfEhClDCG7SGrZkZixxWpwNCDiwJfh88= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190719005602-e377ae9d6386/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= -golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190930201159-7c411dea38b0/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191010075000-0337d82405ff h1:XdBG6es/oFDr1HwaxkxgVve7NB281QhxgK/i4voubFs= -golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054 h1:HHeAlu5H9b71C+Fx0K+1dGgVFN1DM1/wz4aoGOA5qS8= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.0.1 h1:xyiBuvkD2g5n7cYzx6u2sxQvsAy4QJsZFCzGVdzOXZ0= -gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= -gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= +gomodules.xyz/jsonpatch/v2 v2.1.0 h1:Phva6wqu+xR//Njw6iorylFFgn/z547tw5Ne3HZPQ+k= +gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.0.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -805,9 +756,12 @@ google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= @@ -820,7 +774,6 @@ gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= @@ -835,93 +788,58 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2 h1:XZx7nhd5GMaZpmDaEHFVafUZC7ya0fuo7cSJ3UCKYmM= -gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -helm.sh/helm/v3 v3.1.2/go.mod h1:WYsFJuMASa/4XUqLyv54s0U/f3mlAaRErGmyy4z921g= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI= -k8s.io/api v0.17.2/go.mod h1:BS9fjjLc4CMuqfSO8vgbHPKMt5+SF0ET6u/RVDihTo4= -k8s.io/api v0.18.2 h1:wG5g5ZmSVgm5B+eHMIbI9EGATS2L8Z72rda19RIEgY8= -k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= -k8s.io/apiextensions-apiserver v0.17.2/go.mod h1:4KdMpjkEjjDI2pPfBA15OscyNldHWdBCfsWMDWAmSTs= -k8s.io/apiextensions-apiserver v0.18.2 h1:I4v3/jAuQC+89L3Z7dDgAiN4EOjN6sbm6iBqQwHTah8= -k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJFlKL37fC4ZvY= -k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= -k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= -k8s.io/apimachinery v0.18.2 h1:44CmtbmkzVDAhCpRVSiP2R5PPrC2RtlIv/MoB8xpdRA= -k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= -k8s.io/apimachinery v0.18.6 h1:RtFHnfGNfd1N0LeSrKCUznz5xtUP1elRGvHJbL3Ntag= -k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= -k8s.io/apiserver v0.17.2/go.mod h1:lBmw/TtQdtxvrTk0e2cgtOxHizXI+d0mmGQURIHQZlo= -k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw= -k8s.io/cli-runtime v0.17.2/go.mod h1:aa8t9ziyQdbkuizkNLAw3qe3srSyWh9zlSB7zTqRNPI= -k8s.io/client-go v0.17.0/go.mod h1:TYgR6EUHs6k45hb6KWjVD6jFZvJV4gHDikv/It0xz+k= -k8s.io/client-go v0.17.2/go.mod h1:QAzRgsa0C2xl4/eVpeVAZMvikCn8Nm81yqVx3Kk9XYI= -k8s.io/client-go v0.18.2 h1:aLB0iaD4nmwh7arT2wIn+lMnAq7OswjaejkQ8p9bBYE= -k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU= -k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= -k8s.io/code-generator v0.18.2 h1:C1Nn2JiMf244CvBDKVPX0W2mZFJkVBg54T8OV7/Imso= -k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= -k8s.io/component-base v0.17.2/go.mod h1:zMPW3g5aH7cHJpKYQ/ZsGMcgbsA/VyhEugF3QT1awLs= -k8s.io/component-base v0.18.2 h1:SJweNZAGcUvsypLGNPNGeJ9UgPZQ6+bW+gEHe8uyh/Y= -k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 h1:RPscN6KhmG54S33L+lr3GS+oD1jmchIU0ll519K6FA4= -k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= -k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDNZ16KJtb49HfR3ZHB9qYXM= -k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY= -k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/kubectl v0.17.2/go.mod h1:y4rfLV0n6aPmvbRCqZQjvOp3ezxsFgpqL+zF5jH/lxk= -k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= -k8s.io/metrics v0.17.2/go.mod h1:3TkNHET4ROd+NfzNxkjoVfQ0Ob4iZnaHmSEA4vYpwLw= -k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU= -k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= -modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= -modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= -modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= -modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= -mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= -mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= -mvdan.cc/unparam v0.0.0-20190720180237-d51796306d8f/go.mod h1:4G1h5nDURzA3bwVMZIVpwbkw+04kSxk3rAtzlimaUJw= -rsc.io/letsencrypt v0.0.3/go.mod h1:buyQKZ6IXrRnB7TdkHP0RyEybLx18HHyOSoTyoOLqNY= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= -sigs.k8s.io/controller-runtime v0.5.0/go.mod h1:REiJzC7Y00U+2YkMbT8wxgrsX5USpXKGhb2sCtAXiT8= -sigs.k8s.io/controller-runtime v0.6.0 h1:Fzna3DY7c4BIP6KwfSlrfnj20DJ+SeMBK8HSFvOk9NM= -sigs.k8s.io/controller-runtime v0.6.0/go.mod h1:CpYf5pdNY/B352A1TFLAS2JVSlnGQ5O2cftPHndTroo= -sigs.k8s.io/kustomize v2.0.3+incompatible h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0= -sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= -sigs.k8s.io/kustomize/api v0.4.1/go.mod h1:NqxqT+wbYHrD0P19Uu4dXiMsVwI1IwQs+MJHlLhmPqQ= -sigs.k8s.io/kustomize/kyaml v0.1.11 h1:/VvWxVIgH5gG1K4A7trgbyLgO3tRBiAWNhLFVU1HEmo= -sigs.k8s.io/kustomize/kyaml v0.1.11/go.mod h1:72/rLkSi+L/pHM1oCjwrf3ClU+tH5kZQvvdLSqIHwWU= -sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= -sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06 h1:zD2IemQ4LmOcAumeiyDWXKUI2SO0NYDe3H6QGvPOVgU= -sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +k8s.io/api v0.19.2 h1:q+/krnHWKsL7OBZg/rxnycsl9569Pud76UJ77MvKXms= +k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI= +k8s.io/apiextensions-apiserver v0.19.2 h1:oG84UwiDsVDu7dlsGQs5GySmQHCzMhknfhFExJMz9tA= +k8s.io/apiextensions-apiserver v0.19.2/go.mod h1:EYNjpqIAvNZe+svXVx9j4uBaVhTB4C94HkY3w058qcg= +k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/apimachinery v0.19.6 h1:kBLzSGuDdY1NdSV2uFzI+FwZ9wtkmG+X3ZVcWXSqNgA= +k8s.io/apimachinery v0.19.6/go.mod h1:6sRbGRAVY5DOCuZwB5XkqguBqpqLU6q/kOaOdk29z6Q= +k8s.io/apiserver v0.19.2/go.mod h1:FreAq0bJ2vtZFj9Ago/X0oNGC51GfubKK/ViOKfVAOA= +k8s.io/client-go v0.19.2 h1:gMJuU3xJZs86L1oQ99R4EViAADUPMHHtS9jFshasHSc= +k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA= +k8s.io/code-generator v0.19.2/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= +k8s.io/component-base v0.19.2 h1:jW5Y9RcZTb79liEhW3XDVTW7MuvEGP0tQZnfSX6/+gs= +k8s.io/component-base v0.19.2/go.mod h1:g5LrsiTiabMLZ40AR6Hl45f088DevyGY+cCE2agEIVo= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20200912215256-4140de9c8800 h1:9ZNvfPvVIEsp/T1ez4GQuzCcCTEQWhovSofhqR73A6g= +k8s.io/utils v0.0.0-20200912215256-4140de9c8800/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= +sigs.k8s.io/controller-runtime v0.7.0 h1:bU20IBBEPccWz5+zXpLnpVsgBYxqclaHu1pVDl/gEt8= +sigs.k8s.io/controller-runtime v0.7.0/go.mod h1:pJ3YBrJiAqMAZKi6UVGuE98ZrroV1p+pIhoHsMm9wdU= +sigs.k8s.io/kustomize/kyaml v0.10.5 h1:PbJcsZsEM7O3hHtUWTR+4WkHVbQRW9crSy75or1gRbI= +sigs.k8s.io/kustomize/kyaml v0.10.5/go.mod h1:P6Oy/ah/GZMKzJMIJA2a3/bc8YrBkuL5kJji13PSIzY= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= -vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= From 00abe171b9a8241356b301a31019f1402b2d1d1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 27 Jan 2021 23:47:27 +0200 Subject: [PATCH 087/149] Remove old pieces of the generated client code. --- cmd/sample-app/client/client.go | 61 ------- .../client/zz_generated.client_car.go | 152 ------------------ .../client/zz_generated.client_motorcycle.go | 152 ------------------ hack/generate-client.sh | 16 -- 4 files changed, 381 deletions(-) delete mode 100644 cmd/sample-app/client/client.go delete mode 100644 cmd/sample-app/client/zz_generated.client_car.go delete mode 100644 cmd/sample-app/client/zz_generated.client_motorcycle.go delete mode 100755 hack/generate-client.sh diff --git a/cmd/sample-app/client/client.go b/cmd/sample-app/client/client.go deleted file mode 100644 index e4d98247..00000000 --- a/cmd/sample-app/client/client.go +++ /dev/null @@ -1,61 +0,0 @@ -// TODO: Docs - -// +build ignore - -package client - -import ( - api "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample" - "github.com/weaveworks/libgitops/pkg/client" - "github.com/weaveworks/libgitops/pkg/runtime" - "github.com/weaveworks/libgitops/pkg/storage" - - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// TODO: Autogenerate this! - -// NewClient creates a client for the specified storage -func NewClient(s storage.Storage) *Client { - return &Client{ - SampleInternalClient: NewSampleInternalClient(s), - } -} - -// Client is a struct providing high-level access to objects in a storage -// The resource-specific client interfaces are automatically generated based -// off client_resource_template.go. The auto-generation can be done with hack/client.sh -// At the moment SampleInternalClient is the default client. If more than this client -// is created in the future, the SampleInternalClient will be accessible under -// Client.SampleInternal() instead. -type Client struct { - *SampleInternalClient -} - -func NewSampleInternalClient(s storage.Storage) *SampleInternalClient { - return &SampleInternalClient{ - storage: s, - dynamicClients: map[schema.GroupVersionKind]client.DynamicClient{}, - gv: api.SchemeGroupVersion, - } -} - -type SampleInternalClient struct { - storage storage.Storage - gv schema.GroupVersion - carClient CarClient - motorcycleClient MotorcycleClient - dynamicClients map[schema.GroupVersionKind]client.DynamicClient -} - -// Dynamic returns the DynamicClient for the Client instance, for the specific kind -func (c *SampleInternalClient) Dynamic(kind runtime.Kind) (dc client.DynamicClient) { - var ok bool - gvk := c.gv.WithKind(kind.Title()) - if dc, ok = c.dynamicClients[gvk]; !ok { - dc = client.NewDynamicClient(c.storage, gvk) - c.dynamicClients[gvk] = dc - } - - return -} diff --git a/cmd/sample-app/client/zz_generated.client_car.go b/cmd/sample-app/client/zz_generated.client_car.go deleted file mode 100644 index 2661d453..00000000 --- a/cmd/sample-app/client/zz_generated.client_car.go +++ /dev/null @@ -1,152 +0,0 @@ -// +build ignore - -/* - Note: This file is autogenerated! Do not edit it manually! - Edit client_car_template.go instead, and run - hack/generate-client.sh afterwards. -*/ - -package client - -import ( - "fmt" - - api "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample" - - log "github.com/sirupsen/logrus" - "github.com/weaveworks/libgitops/pkg/runtime" - "github.com/weaveworks/libgitops/pkg/storage" - "github.com/weaveworks/libgitops/pkg/storage/filterer" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// CarClient is an interface for accessing Car-specific API objects -type CarClient interface { - // New returns a new Car - New() *api.Car - // Get returns the Car matching given UID from the storage - Get(runtime.UID) (*api.Car, error) - // Set saves the given Car into persistent storage - Set(*api.Car) error - // Patch performs a strategic merge patch on the object with - // the given UID, using the byte-encoded patch given - Patch(runtime.UID, []byte) error - // Find returns the Car matching the given filter, filters can - // match e.g. the Object's Name, UID or a specific property - Find(filter filterer.BaseFilter) (*api.Car, error) - // FindAll returns multiple Cars matching the given filter, filters can - // match e.g. the Object's Name, UID or a specific property - FindAll(filter filterer.BaseFilter) ([]*api.Car, error) - // Delete deletes the Car with the given UID from the storage - Delete(uid runtime.UID) error - // List returns a list of all Cars available - List() ([]*api.Car, error) -} - -// Cars returns the CarClient for the Client object -func (c *SampleInternalClient) Cars() CarClient { - if c.carClient == nil { - c.carClient = newCarClient(c.storage, c.gv) - } - - return c.carClient -} - -// carClient is a struct implementing the CarClient interface -// It uses a shared storage instance passed from the Client together with its own Filterer -type carClient struct { - storage storage.Storage - filterer *filterer.Filterer - gvk schema.GroupVersionKind -} - -// newCarClient builds the carClient struct using the storage implementation and a new Filterer -func newCarClient(s storage.Storage, gv schema.GroupVersion) CarClient { - return &carClient{ - storage: s, - filterer: filterer.NewFilterer(s), - gvk: gv.WithKind(api.KindCar.Title()), - } -} - -// New returns a new Object of its kind -func (c *carClient) New() *api.Car { - log.Tracef("Client.New; GVK: %v", c.gvk) - obj, err := c.storage.New(c.gvk) - if err != nil { - panic(fmt.Sprintf("Client.New must not return an error: %v", err)) - } - return obj.(*api.Car) -} - -// Find returns a single Car based on the given Filter -func (c *carClient) Find(filter filterer.BaseFilter) (*api.Car, error) { - log.Tracef("Client.Find; GVK: %v", c.gvk) - object, err := c.filterer.Find(c.gvk, filter) - if err != nil { - return nil, err - } - - return object.(*api.Car), nil -} - -// FindAll returns multiple Cars based on the given Filter -func (c *carClient) FindAll(filter filterer.BaseFilter) ([]*api.Car, error) { - log.Tracef("Client.FindAll; GVK: %v", c.gvk) - matches, err := c.filterer.FindAll(c.gvk, filter) - if err != nil { - return nil, err - } - - results := make([]*api.Car, 0, len(matches)) - for _, item := range matches { - results = append(results, item.(*api.Car)) - } - - return results, nil -} - -// Get returns the Car matching given UID from the storage -func (c *carClient) Get(uid runtime.UID) (*api.Car, error) { - log.Tracef("Client.Get; UID: %q, GVK: %v", uid, c.gvk) - object, err := c.storage.Get(c.gvk, uid) - if err != nil { - return nil, err - } - - return object.(*api.Car), nil -} - -// Set saves the given Car into the persistent storage -func (c *carClient) Set(car *api.Car) error { - log.Tracef("Client.Set; UID: %q, GVK: %v", car.GetUID(), c.gvk) - return c.storage.Set(c.gvk, car) -} - -// Patch performs a strategic merge patch on the object with -// the given UID, using the byte-encoded patch given -func (c *carClient) Patch(uid runtime.UID, patch []byte) error { - return c.storage.Patch(c.gvk, uid, patch) -} - -// Delete deletes the Car from the storage -func (c *carClient) Delete(uid runtime.UID) error { - log.Tracef("Client.Delete; UID: %q, GVK: %v", uid, c.gvk) - return c.storage.Delete(c.gvk, uid) -} - -// List returns a list of all Cars available -func (c *carClient) List() ([]*api.Car, error) { - log.Tracef("Client.List; GVK: %v", c.gvk) - list, err := c.storage.List(c.gvk) - if err != nil { - return nil, err - } - - results := make([]*api.Car, 0, len(list)) - for _, item := range list { - results = append(results, item.(*api.Car)) - } - - return results, nil -} diff --git a/cmd/sample-app/client/zz_generated.client_motorcycle.go b/cmd/sample-app/client/zz_generated.client_motorcycle.go deleted file mode 100644 index 7256e003..00000000 --- a/cmd/sample-app/client/zz_generated.client_motorcycle.go +++ /dev/null @@ -1,152 +0,0 @@ -// +build ignore - -/* - Note: This file is autogenerated! Do not edit it manually! - Edit client_motorcycle_template.go instead, and run - hack/generate-client.sh afterwards. -*/ - -package client - -import ( - "fmt" - - api "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample" - - log "github.com/sirupsen/logrus" - "github.com/weaveworks/libgitops/pkg/runtime" - "github.com/weaveworks/libgitops/pkg/storage" - "github.com/weaveworks/libgitops/pkg/storage/filterer" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// MotorcycleClient is an interface for accessing Motorcycle-specific API objects -type MotorcycleClient interface { - // New returns a new Motorcycle - New() *api.Motorcycle - // Get returns the Motorcycle matching given UID from the storage - Get(runtime.UID) (*api.Motorcycle, error) - // Set saves the given Motorcycle into persistent storage - Set(*api.Motorcycle) error - // Patch performs a strategic merge patch on the object with - // the given UID, using the byte-encoded patch given - Patch(runtime.UID, []byte) error - // Find returns the Motorcycle matching the given filter, filters can - // match e.g. the Object's Name, UID or a specific property - Find(filter filterer.BaseFilter) (*api.Motorcycle, error) - // FindAll returns multiple Motorcycles matching the given filter, filters can - // match e.g. the Object's Name, UID or a specific property - FindAll(filter filterer.BaseFilter) ([]*api.Motorcycle, error) - // Delete deletes the Motorcycle with the given UID from the storage - Delete(uid runtime.UID) error - // List returns a list of all Motorcycles available - List() ([]*api.Motorcycle, error) -} - -// Motorcycles returns the MotorcycleClient for the Client object -func (c *SampleInternalClient) Motorcycles() MotorcycleClient { - if c.motorcycleClient == nil { - c.motorcycleClient = newMotorcycleClient(c.storage, c.gv) - } - - return c.motorcycleClient -} - -// motorcycleClient is a struct implementing the MotorcycleClient interface -// It uses a shared storage instance passed from the Client together with its own Filterer -type motorcycleClient struct { - storage storage.Storage - filterer *filterer.Filterer - gvk schema.GroupVersionKind -} - -// newMotorcycleClient builds the motorcycleClient struct using the storage implementation and a new Filterer -func newMotorcycleClient(s storage.Storage, gv schema.GroupVersion) MotorcycleClient { - return &motorcycleClient{ - storage: s, - filterer: filterer.NewFilterer(s), - gvk: gv.WithKind(api.KindMotorcycle.Title()), - } -} - -// New returns a new Object of its kind -func (c *motorcycleClient) New() *api.Motorcycle { - log.Tracef("Client.New; GVK: %v", c.gvk) - obj, err := c.storage.New(c.gvk) - if err != nil { - panic(fmt.Sprintf("Client.New must not return an error: %v", err)) - } - return obj.(*api.Motorcycle) -} - -// Find returns a single Motorcycle based on the given Filter -func (c *motorcycleClient) Find(filter filterer.BaseFilter) (*api.Motorcycle, error) { - log.Tracef("Client.Find; GVK: %v", c.gvk) - object, err := c.filterer.Find(c.gvk, filter) - if err != nil { - return nil, err - } - - return object.(*api.Motorcycle), nil -} - -// FindAll returns multiple Motorcycles based on the given Filter -func (c *motorcycleClient) FindAll(filter filterer.BaseFilter) ([]*api.Motorcycle, error) { - log.Tracef("Client.FindAll; GVK: %v", c.gvk) - matches, err := c.filterer.FindAll(c.gvk, filter) - if err != nil { - return nil, err - } - - results := make([]*api.Motorcycle, 0, len(matches)) - for _, item := range matches { - results = append(results, item.(*api.Motorcycle)) - } - - return results, nil -} - -// Get returns the Motorcycle matching given UID from the storage -func (c *motorcycleClient) Get(uid runtime.UID) (*api.Motorcycle, error) { - log.Tracef("Client.Get; UID: %q, GVK: %v", uid, c.gvk) - object, err := c.storage.Get(c.gvk, uid) - if err != nil { - return nil, err - } - - return object.(*api.Motorcycle), nil -} - -// Set saves the given Motorcycle into the persistent storage -func (c *motorcycleClient) Set(motorcycle *api.Motorcycle) error { - log.Tracef("Client.Set; UID: %q, GVK: %v", motorcycle.GetUID(), c.gvk) - return c.storage.Set(c.gvk, motorcycle) -} - -// Patch performs a strategic merge patch on the object with -// the given UID, using the byte-encoded patch given -func (c *motorcycleClient) Patch(uid runtime.UID, patch []byte) error { - return c.storage.Patch(c.gvk, uid, patch) -} - -// Delete deletes the Motorcycle from the storage -func (c *motorcycleClient) Delete(uid runtime.UID) error { - log.Tracef("Client.Delete; UID: %q, GVK: %v", uid, c.gvk) - return c.storage.Delete(c.gvk, uid) -} - -// List returns a list of all Motorcycles available -func (c *motorcycleClient) List() ([]*api.Motorcycle, error) { - log.Tracef("Client.List; GVK: %v", c.gvk) - list, err := c.storage.List(c.gvk) - if err != nil { - return nil, err - } - - results := make([]*api.Motorcycle, 0, len(list)) - for _, item := range list { - results = append(results, item.(*api.Motorcycle)) - } - - return results, nil -} diff --git a/hack/generate-client.sh b/hack/generate-client.sh deleted file mode 100755 index b7e5853d..00000000 --- a/hack/generate-client.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -SCRIPT_DIR=$( dirname "${BASH_SOURCE[0]}" ) -cd ${SCRIPT_DIR}/.. - -RESOURCES="Car Motorcycle" -CLIENT_NAME=SampleInternal -OUT_DIR=cmd/sample-app/client -API_DIR="github.com/weaveworks/libgitops/cmd/sample-app/apis/sample" -mkdir -p ${OUT_DIR} -for Resource in ${RESOURCES}; do - resource=$(echo "${Resource}" | awk '{print tolower($0)}') - sed -e "s|Resource|${Resource}|g;s|resource|${resource}|g;/build ignore/d;s|API_DIR|${API_DIR}|g;s|*Client|*${CLIENT_NAME}Client|g" \ - pkg/client/client_resource_template.go > \ - ${OUT_DIR}/zz_generated.client_${resource}.go -done From affc5a84c567c290f28c34266131e1eb79054157 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 27 Jan 2021 23:47:40 +0200 Subject: [PATCH 088/149] make tidy --- go.mod | 1 - 1 file changed, 1 deletion(-) diff --git a/go.mod b/go.mod index 936a5a0d..19bb9bbf 100644 --- a/go.mod +++ b/go.mod @@ -26,5 +26,4 @@ require ( k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 sigs.k8s.io/controller-runtime v0.7.0 sigs.k8s.io/kustomize/kyaml v0.10.5 - sigs.k8s.io/yaml v1.2.0 ) From 46acb8f00713a7fee6a880014c59083537c7bc6e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 27 Jan 2021 23:47:49 +0200 Subject: [PATCH 089/149] Fixup the makefile a bit --- Makefile | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 4f3230b9..1f2c88b7 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ UID_GID ?= $(shell id -u):$(shell id -g) -GO_VERSION ?= 1.14.4 +GO_VERSION ?= 1.15.6 GIT_VERSION := $(shell hack/ldflags.sh --version-only) PROJECT := github.com/weaveworks/libgitops BOUNDING_API_DIRS := ${PROJECT}/cmd/apis/sample @@ -7,7 +7,6 @@ API_DIRS := ${PROJECT}/cmd/sample-app/apis/sample,${PROJECT}/cmd/sample-app/apis SRC_PKGS := cmd pkg DOCKER_ARGS := --rm CACHE_DIR := $(shell pwd)/bin/cache -API_DOCS := api/sample-app.md api/runtime.md BINARIES := bin/sample-app bin/sample-gitops bin/sample-watch # If we're not running in CI, run Docker interactively @@ -39,7 +38,6 @@ test-internal: tidy: docker-tidy-internal tidy-internal: /go/bin/goimports go mod tidy - hack/generate-client.sh gofmt -s -w ${SRC_PKGS} goimports -w ${SRC_PKGS} From 850d735fd678d4279dedf6590d7f2248c03934a8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 27 Jan 2021 23:56:37 +0200 Subject: [PATCH 090/149] Get rid of the separate util package --- pkg/serializer/options.go | 33 ++++++------ pkg/storage/client/transactional/client.go | 15 +++++- pkg/util/fs.go | 23 --------- pkg/util/util.go | 58 ---------------------- 4 files changed, 30 insertions(+), 99 deletions(-) delete mode 100644 pkg/util/fs.go delete mode 100644 pkg/util/util.go diff --git a/pkg/serializer/options.go b/pkg/serializer/options.go index 6f56e8e9..e5736e62 100644 --- a/pkg/serializer/options.go +++ b/pkg/serializer/options.go @@ -1,6 +1,8 @@ package serializer -import "github.com/weaveworks/libgitops/pkg/util" +import ( + "k8s.io/utils/pointer" +) // TODO: Import k8s.io/utils/pointer instead of baking our own ptrutils package. @@ -11,7 +13,7 @@ type EncodeOption interface { func defaultEncodeOpts() *EncodeOptions { return &EncodeOptions{ // Default to "pretty encoding" - JSONIndent: util.IntPtr(2), + JSONIndent: pointer.Int32Ptr(2), PreserveComments: PreserveCommentsDisable, } } @@ -23,8 +25,7 @@ type EncodeOptions struct { // // Default: 2, i.e. pretty output // TODO: Make this a property of the FrameWriter instead? - // TODO: Use a typed size of the int, e.g. int32? - JSONIndent *int + JSONIndent *int32 // Whether to preserve YAML comments internally. // This only works for objects embedding metav1.ObjectMeta. @@ -88,12 +89,12 @@ func (p PreserveComments) ApplyToDecode(target *DecodeOptions) { // Indent JSON encoding output with this many spaces. // Use PrettyEncode(false) or JSONIndent(0) to disable pretty output. // Only applicable to ContentTypeJSON framers. -type JSONIndent int +type JSONIndent int32 var _ EncodeOption = JSONIndent(0) func (i JSONIndent) ApplyToEncode(target *EncodeOptions) { - target.JSONIndent = util.IntPtr(int(i)) + target.JSONIndent = pointer.Int32Ptr(int32(i)) } // Shorthand for JSONIndent(0) if false, or JSONIndent(2) if true @@ -117,12 +118,12 @@ type DecodeOption interface { func defaultDecodeOpts() *DecodeOptions { return &DecodeOptions{ - ConvertToHub: util.BoolPtr(false), - Strict: util.BoolPtr(true), - Default: util.BoolPtr(false), - DecodeListElements: util.BoolPtr(true), + ConvertToHub: pointer.BoolPtr(false), + Strict: pointer.BoolPtr(true), + Default: pointer.BoolPtr(false), + DecodeListElements: pointer.BoolPtr(true), PreserveComments: PreserveCommentsDisable, - DecodeUnknown: util.BoolPtr(false), + DecodeUnknown: pointer.BoolPtr(false), } } @@ -210,7 +211,7 @@ type ConvertToHub bool var _ DecodeOption = ConvertToHub(false) func (b ConvertToHub) ApplyToDecode(target *DecodeOptions) { - target.ConvertToHub = util.BoolPtr(bool(b)) + target.ConvertToHub = pointer.BoolPtr(bool(b)) } // Parse the YAML/JSON in strict mode, returning a specific error if the input @@ -220,7 +221,7 @@ type DecodeStrict bool var _ DecodeOption = DecodeStrict(false) func (b DecodeStrict) ApplyToDecode(target *DecodeOptions) { - target.Strict = util.BoolPtr(bool(b)) + target.Strict = pointer.BoolPtr(bool(b)) } // Automatically default the decoded object. @@ -229,7 +230,7 @@ type DefaultAtDecode bool var _ DecodeOption = DefaultAtDecode(false) func (b DefaultAtDecode) ApplyToDecode(target *DecodeOptions) { - target.Default = util.BoolPtr(bool(b)) + target.Default = pointer.BoolPtr(bool(b)) } // Only applicable for Decoder.DecodeAll(). If the underlying data contains a v1.List, @@ -242,7 +243,7 @@ type DecodeListElements bool var _ DecodeOption = DecodeListElements(false) func (b DecodeListElements) ApplyToDecode(target *DecodeOptions) { - target.DecodeListElements = util.BoolPtr(bool(b)) + target.DecodeListElements = pointer.BoolPtr(bool(b)) } // DecodeUnknown specifies whether decode objects with an unknown GroupVersionKind into a @@ -253,5 +254,5 @@ type DecodeUnknown bool var _ DecodeOption = DecodeUnknown(false) func (b DecodeUnknown) ApplyToDecode(target *DecodeOptions) { - target.DecodeUnknown = util.BoolPtr(bool(b)) + target.DecodeUnknown = pointer.BoolPtr(bool(b)) } diff --git a/pkg/storage/client/transactional/client.go b/pkg/storage/client/transactional/client.go index 0c75cdce..1108c1d7 100644 --- a/pkg/storage/client/transactional/client.go +++ b/pkg/storage/client/transactional/client.go @@ -2,6 +2,8 @@ package transactional import ( "context" + "crypto/rand" + "encoding/hex" "fmt" "strings" "sync" @@ -11,7 +13,6 @@ import ( "github.com/weaveworks/libgitops/pkg/storage/backend" "github.com/weaveworks/libgitops/pkg/storage/client" "github.com/weaveworks/libgitops/pkg/storage/core" - "github.com/weaveworks/libgitops/pkg/util" utilerrs "k8s.io/apimachinery/pkg/util/errors" ) @@ -271,7 +272,7 @@ func (c *Generic) branchTransaction(ctx context.Context, headBranch string, opts // Append random bytes to the end of the head branch if it ends with a dash if strings.HasSuffix(headBranch, "-") { - suffix, err := util.RandomSHA(4) + suffix, err := randomSHA(4) if err != nil { return nil, err } @@ -317,3 +318,13 @@ func (c *Generic) branchTransaction(ctx context.Context, headBranch string, opts merger: c.merger, }, nil } + +// randomSHA returns a hex-encoded string from {byteLen} random bytes. +func randomSHA(byteLen int) (string, error) { + b := make([]byte, byteLen) + _, err := rand.Read(b) + if err != nil { + return "", err + } + return hex.EncodeToString(b), nil +} diff --git a/pkg/util/fs.go b/pkg/util/fs.go deleted file mode 100644 index 3e1f7d45..00000000 --- a/pkg/util/fs.go +++ /dev/null @@ -1,23 +0,0 @@ -package util - -import ( - "os" -) - -func PathExists(path string) (bool, os.FileInfo) { - info, err := os.Stat(path) - if os.IsNotExist(err) { - return false, nil - } - - return true, info -} - -func FileExists(filename string) bool { - exists, info := PathExists(filename) - if !exists { - return false - } - - return !info.IsDir() -} diff --git a/pkg/util/util.go b/pkg/util/util.go deleted file mode 100644 index ab844bfb..00000000 --- a/pkg/util/util.go +++ /dev/null @@ -1,58 +0,0 @@ -package util - -import ( - "bytes" - "crypto/rand" - "encoding/hex" - "fmt" - "os/exec" - "strings" -) - -func ExecuteCommand(command string, args ...string) (string, error) { - cmd := exec.Command(command, args...) - out, err := cmd.CombinedOutput() - if err != nil { - return "", fmt.Errorf("command %q exited with %q: %v", cmd.Args, out, err) - } - - return string(bytes.TrimSpace(out)), nil -} - -func MatchPrefix(prefix string, fields ...string) ([]string, bool) { - var prefixMatches, exactMatches []string - - for _, str := range fields { - if str == prefix { - exactMatches = append(exactMatches, str) - } else if strings.HasPrefix(str, prefix) { - prefixMatches = append(prefixMatches, str) - } - } - - // If we have exact matches, return them - // and set the exact match boolean - if len(exactMatches) > 0 { - return exactMatches, true - } - - return prefixMatches, false -} - -func BoolPtr(b bool) *bool { - return &b -} - -func IntPtr(i int) *int { - return &i -} - -// RandomSHA returns a hex-encoded string from {byteLen} random bytes. -func RandomSHA(byteLen int) (string, error) { - b := make([]byte, byteLen) - _, err := rand.Read(b) - if err != nil { - return "", err - } - return hex.EncodeToString(b), nil -} From f2aefd6d9a45d0fb8464313259590d06f8a9b551 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 27 Jan 2021 23:56:52 +0200 Subject: [PATCH 091/149] go mod update --- go.mod | 1 + 1 file changed, 1 insertion(+) diff --git a/go.mod b/go.mod index 19bb9bbf..499f4821 100644 --- a/go.mod +++ b/go.mod @@ -24,6 +24,7 @@ require ( k8s.io/api v0.19.2 k8s.io/apimachinery v0.19.6 k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 + k8s.io/utils v0.0.0-20200912215256-4140de9c8800 sigs.k8s.io/controller-runtime v0.7.0 sigs.k8s.io/kustomize/kyaml v0.10.5 ) From e8c403831c515e622e3e4825268693420f72a47c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Fri, 29 Jan 2021 17:09:23 +0200 Subject: [PATCH 092/149] Make the serializer system more pluggable and "open". --- cmd/sample-app/main.go | 6 ++- cmd/sample-gitops/main.go | 10 +++-- cmd/sample-watch/main.go | 10 +++-- pkg/serializer/convertor.go | 16 +++++--- pkg/serializer/decode.go | 35 +++++++++------- pkg/serializer/defaulter.go | 28 ++++++++----- pkg/serializer/encode.go | 29 +++++++++---- pkg/serializer/patch.go | 36 ++++++++++++---- pkg/serializer/serializer.go | 75 ++++++++++++++++++---------------- pkg/serializer/utils.go | 30 ++++++++++++++ pkg/storage/backend/backend.go | 35 ++++++++++------ pkg/storage/client/client.go | 12 +++--- pkg/storage/core/recognizer.go | 22 +++++----- 13 files changed, 223 insertions(+), 121 deletions(-) diff --git a/cmd/sample-app/main.go b/cmd/sample-app/main.go index 2812acc3..f915d941 100644 --- a/cmd/sample-app/main.go +++ b/cmd/sample-app/main.go @@ -63,12 +63,14 @@ func run(manifestDir string) error { return err } - b, err := backend.NewGeneric(s, scheme.Serializer, kube.NewNamespaceEnforcer(), nil, nil) + encoder := scheme.Serializer.Encoder() + decoder := scheme.Serializer.Decoder() + b, err := backend.NewGeneric(s, encoder, decoder, kube.NewNamespaceEnforcer(), nil, nil) if err != nil { return err } - plainClient, err := client.NewGeneric(b, scheme.Serializer.Patcher()) + plainClient, err := client.NewGeneric(b) if err != nil { return err } diff --git a/cmd/sample-gitops/main.go b/cmd/sample-gitops/main.go index d18a9d55..45820349 100644 --- a/cmd/sample-gitops/main.go +++ b/cmd/sample-gitops/main.go @@ -143,11 +143,15 @@ func run(identityFile, gitURL, ghToken, authorName, authorEmail, prMilestone str return err } + // Just use default encoders and decoders + encoder := scheme.Serializer.Encoder() + decoder := scheme.Serializer.Decoder() + rawManifest, err := unstructuredevent.NewManifest( localClone.Dir(), filesystem.DefaultContentTyper, core.StaticNamespacer{NamespacedIsDefaultPolicy: false}, // all objects root-spaced - &core.SerializerObjectRecognizer{Serializer: scheme.Serializer}, + &core.KubeObjectRecognizer{Decoder: decoder}, filesystem.DefaultPathExcluders(), ) if err != nil { @@ -162,12 +166,12 @@ func run(identityFile, gitURL, ghToken, authorName, authorEmail, prMilestone str defer func() { _ = rawManifest.Close() }() - b, err := backend.NewGeneric(rawManifest, scheme.Serializer, kube.NewNamespaceEnforcer(), nil, nil) + b, err := backend.NewGeneric(rawManifest, encoder, decoder, kube.NewNamespaceEnforcer(), nil, nil) if err != nil { return err } - gitClient, err := client.NewGeneric(b, scheme.Serializer.Patcher()) + gitClient, err := client.NewGeneric(b) if err != nil { return err } diff --git a/cmd/sample-watch/main.go b/cmd/sample-watch/main.go index c81a279a..11f5c553 100644 --- a/cmd/sample-watch/main.go +++ b/cmd/sample-watch/main.go @@ -48,11 +48,15 @@ func run(watchDir string) error { ctx := context.Background() + // Just use default encoders and decoders + encoder := scheme.Serializer.Encoder() + decoder := scheme.Serializer.Decoder() + rawManifest, err := unstructuredevent.NewManifest( watchDir, filesystem.DefaultContentTyper, core.StaticNamespacer{NamespacedIsDefaultPolicy: false}, // all objects root-spaced - &core.SerializerObjectRecognizer{Serializer: scheme.Serializer}, + &core.KubeObjectRecognizer{Decoder: decoder}, filesystem.DefaultPathExcluders(), ) if err != nil { @@ -65,12 +69,12 @@ func run(watchDir string) error { return err } - b, err := backend.NewGeneric(rawManifest, scheme.Serializer, kube.NewNamespaceEnforcer(), nil, nil) + b, err := backend.NewGeneric(rawManifest, encoder, decoder, kube.NewNamespaceEnforcer(), nil, nil) if err != nil { return err } - watchStorage, err := client.NewGeneric(b, scheme.Serializer.Patcher()) + watchStorage, err := client.NewGeneric(b) if err != nil { return err } diff --git a/pkg/serializer/convertor.go b/pkg/serializer/convertor.go index 3fbc814f..f6368306 100644 --- a/pkg/serializer/convertor.go +++ b/pkg/serializer/convertor.go @@ -19,19 +19,25 @@ var ( errObjMustNotBeBoth = errors.New("given object must not implement both the Convertible and Hub interfaces") ) -func newConverter(scheme *runtime.Scheme) *converter { +func NewConverter(schemeLock LockedScheme) *converter { return &converter{ - scheme: scheme, - convertor: newObjectConvertor(scheme, true), + LockedScheme: schemeLock, + convertor: newObjectConvertor(schemeLock.Scheme(), true), } } // converter implements the Converter interface +// TODO: This implementation should support converting from a +// convertible to an other convertible through the Hub type converter struct { - scheme *runtime.Scheme + LockedScheme convertor *objectConvertor } +func (c *converter) SchemeLock() LockedScheme { + return c.LockedScheme +} + // Convert converts in directly into out. out should be an empty object of the destination type. // Both objects must be of the same kind and either have autogenerated conversions registered, or // be controller-runtime CRD-style implementers of the sigs.k8s.io/controller-runtime/pkg/conversion.Hub @@ -46,7 +52,7 @@ func (c *converter) Convert(in, out runtime.Object) error { // TODO: If needed, this function could only accept a GroupVersion, not GroupVersionKind func (c *converter) ConvertIntoNew(in runtime.Object, gvk schema.GroupVersionKind) (runtime.Object, error) { // Create a new object of the given gvk - obj, err := c.scheme.New(gvk) + obj, err := c.Scheme().New(gvk) if err != nil { return nil, err } diff --git a/pkg/serializer/decode.go b/pkg/serializer/decode.go index 7aee5af4..dd496abb 100644 --- a/pkg/serializer/decode.go +++ b/pkg/serializer/decode.go @@ -20,25 +20,32 @@ var listGVK = metav1.Unversioned.WithKind("List") // as a variadic-sized Option slice? It would probably take caching the *json.Serializer // and runtime.Decoder for the given options they use, though. -func newDecoder(schemeAndCodec *schemeAndCodec, opts DecodeOptions) Decoder { +func NewDecoder(schemeLock LockedScheme, opts ...DecodeOption) Decoder { + // Make the options struct + o := *defaultDecodeOpts().ApplyOptions(opts) + // Allow both YAML and JSON inputs (JSON is a subset of YAML), and deserialize in strict mode - s := json.NewSerializerWithOptions(json.DefaultMetaFactory, schemeAndCodec.scheme, schemeAndCodec.scheme, json.SerializerOptions{ + s := json.NewSerializerWithOptions(json.DefaultMetaFactory, schemeLock.Scheme(), schemeLock.Scheme(), json.SerializerOptions{ Yaml: true, - Strict: *opts.Strict, + Strict: *o.Strict, }) - decodeCodec := decoderForVersion(schemeAndCodec.scheme, s, *opts.Default, *opts.ConvertToHub) + decodeCodec := decoderForVersion(schemeLock.Scheme(), s, *o.Default, *o.ConvertToHub) - return &decoder{schemeAndCodec, decodeCodec, opts} + return &decoder{schemeLock, decodeCodec, o} } type decoder struct { - *schemeAndCodec + LockedScheme decoder runtime.Decoder opts DecodeOptions } +func (d *decoder) SchemeLock() LockedScheme { + return d.LockedScheme +} + // Decode returns the decoded object from the next document in the FrameReader stream. // If there are multiple documents in the underlying stream, this call will read one // document and return it. Decode might be invoked for getting new documents until it @@ -71,11 +78,11 @@ func (d *decoder) decode(doc []byte, into runtime.Object, ct ContentType) (runti if *d.opts.DecodeListElements { // As .AddKnownTypes is writing to the scheme, make sure we guard the check and the write with a // mutex. - d.schemeMu.Lock() - if !d.scheme.Recognizes(listGVK) { - d.scheme.AddKnownTypes(metav1.Unversioned, &metav1.List{}) + d.SchemeLock() + if !d.Scheme().Recognizes(listGVK) { + d.Scheme().AddKnownTypes(metav1.Unversioned, &metav1.List{}) } - d.schemeMu.Unlock() + d.SchemeUnlock() } // Record if this decode call should have runtime.DecodeInto-functionality @@ -201,18 +208,18 @@ func (d *decoder) handleDecodeError(doc []byte, origErr error) error { // TODO: Unit test that typed errors are returned properly // Check if the group was known. If not, return that specific error - if !d.scheme.IsGroupRegistered(gvk.Group) { + if !d.Scheme().IsGroupRegistered(gvk.Group) { return NewUnrecognizedGroupError(*gvk, origErr) } // Return a structured error if the group was registered with the scheme but the version was unrecognized - if !d.scheme.IsVersionRegistered(gvk.GroupVersion()) { - gvs := d.scheme.PrioritizedVersionsForGroup(gvk.Group) + if !d.Scheme().IsVersionRegistered(gvk.GroupVersion()) { + gvs := d.Scheme().PrioritizedVersionsForGroup(gvk.Group) return NewUnrecognizedVersionError(gvs, *gvk, origErr) } // Return a structured error if the kind is not known - if !d.scheme.Recognizes(*gvk) { + if !d.Scheme().Recognizes(*gvk) { return NewUnrecognizedKindError(*gvk, origErr) } diff --git a/pkg/serializer/defaulter.go b/pkg/serializer/defaulter.go index 6ff0ad8e..d0b117f3 100644 --- a/pkg/serializer/defaulter.go +++ b/pkg/serializer/defaulter.go @@ -6,19 +6,25 @@ import ( "k8s.io/apimachinery/pkg/util/errors" ) -func newDefaulter(scheme *runtime.Scheme) *defaulter { - return &defaulter{scheme} +func NewDefaulter(schemeLock LockedScheme) Defaulter { + // We do not write to the scheme in the defaulter at this time. + // If we start doing that, we must also make use of the locker + return &defaulter{schemeLock} } type defaulter struct { - scheme *runtime.Scheme + LockedScheme +} + +func (d *defaulter) SchemeLock() LockedScheme { + return d.LockedScheme } // NewDefaultedObject returns a new, defaulted object. It is essentially scheme.New() and // scheme.Default(obj), but with extra logic to also cover internal versions. // Important to note here is that the TypeMeta information is NOT applied automatically. func (d *defaulter) NewDefaultedObject(gvk schema.GroupVersionKind) (runtime.Object, error) { - obj, err := d.scheme.New(gvk) + obj, err := d.Scheme().New(gvk) if err != nil { return nil, err } @@ -41,36 +47,36 @@ func (d *defaulter) Default(objs ...runtime.Object) error { func (d *defaulter) runDefaulting(obj runtime.Object) error { // First, get the groupversionkind of the object - gvk, err := GVKForObject(d.scheme, obj) + gvk, err := GVKForObject(d.Scheme(), obj) if err != nil { return err } // If the version is external, just default it and return. if gvk.Version != runtime.APIVersionInternal { - d.scheme.Default(obj) + d.Scheme().Default(obj) return nil } // We know that the current object is internal // Get the preferred external version... - gv, err := prioritizedVersionForGroup(d.scheme, gvk.Group) + gv, err := prioritizedVersionForGroup(d.Scheme(), gvk.Group) if err != nil { return err } // ...and make a new object of it - external, err := d.scheme.New(gv.WithKind(gvk.Kind)) + external, err := d.Scheme().New(gv.WithKind(gvk.Kind)) if err != nil { return err } // Convert the internal object to the external - if err := d.scheme.Convert(obj, external, nil); err != nil { + if err := d.Scheme().Convert(obj, external, nil); err != nil { return err } // Default the external - d.scheme.Default(external) + d.Scheme().Default(external) // And convert back to internal - return d.scheme.Convert(external, obj, nil) + return d.Scheme().Convert(external, obj, nil) } diff --git a/pkg/serializer/encode.go b/pkg/serializer/encode.go index a06bd8ca..ae46760b 100644 --- a/pkg/serializer/encode.go +++ b/pkg/serializer/encode.go @@ -7,21 +7,32 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + k8sserializer "k8s.io/apimachinery/pkg/runtime/serializer" ) -func newEncoder(schemeAndCodec *schemeAndCodec, opts EncodeOptions) Encoder { +func NewEncoder(schemeLock LockedScheme, codecs *k8sserializer.CodecFactory, opts ...EncodeOption) Encoder { return &encoder{ - schemeAndCodec, - opts, + LockedScheme: schemeLock, + codecs: codecs, + opts: *defaultEncodeOpts().ApplyOptions(opts), } } type encoder struct { - *schemeAndCodec + LockedScheme + codecs *k8sserializer.CodecFactory opts EncodeOptions } +func (e *encoder) SchemeLock() LockedScheme { + return e.LockedScheme +} + +func (e *encoder) CodecFactory() *k8sserializer.CodecFactory { + return e.codecs +} + // Encode encodes the given objects and writes them to the specified FrameWriter. // The FrameWriter specifies the ContentType. This encoder will automatically convert any // internal object given to the preferred external groupversion. No conversion will happen @@ -31,14 +42,14 @@ type encoder struct { func (e *encoder) Encode(fw FrameWriter, objs ...runtime.Object) error { for _, obj := range objs { // Get the kind for the given object - gvk, err := GVKForObject(e.scheme, obj) + gvk, err := GVKForObject(e.Scheme(), obj) if err != nil { return err } // If the object is internal, convert it to the preferred external one if gvk.Version == runtime.APIVersionInternal { - gv, err := prioritizedVersionForGroup(e.scheme, gvk.Group) + gv, err := prioritizedVersionForGroup(e.Scheme(), gvk.Group) if err != nil { return err } @@ -72,7 +83,7 @@ func (e *encoder) EncodeForGroupVersion(fw FrameWriter, obj runtime.Object, gv s encoder := serializerInfo.Serializer // Get a version-specific encoder for the specified groupversion - versionEncoder := encoderForVersion(e.scheme, encoder, gv) + versionEncoder := encoderForVersion(e.Scheme(), encoder, gv) // Check if the user requested prettified JSON output. // If the ContentType is JSON this is ok, we will intent the encode output on the fly. @@ -105,14 +116,14 @@ func encoderForVersion(scheme *runtime.Scheme, encoder runtime.Encoder, gv schem } type jsonPrettyFrameWriter struct { - indent int + indent int32 fw FrameWriter } func (w *jsonPrettyFrameWriter) Write(p []byte) (n int, err error) { // Indent the source bytes var indented bytes.Buffer - err = json.Indent(&indented, p, "", strings.Repeat(" ", w.indent)) + err = json.Indent(&indented, p, "", strings.Repeat(" ", int(w.indent))) if err != nil { return } diff --git a/pkg/serializer/patch.go b/pkg/serializer/patch.go index bd580e05..fdf58154 100644 --- a/pkg/serializer/patch.go +++ b/pkg/serializer/patch.go @@ -35,10 +35,32 @@ type Patcher interface { // If knowledge about the schema is required by the patch type (e.g. StrategicMergePatch), // it is the liability of the caller to provide an OpenAPI schema. ApplyOnUnstructured(bytePatcher patch.BytePatcher, patch []byte, obj runtime.Unstructured, schema openapi.Schema) error + + // Encoder gets the underlying Encoder + Encoder() Encoder + + // Decoder gets the underlying Decoder + Decoder() Decoder +} + +func NewPatcher(encoder Encoder, decoder Decoder) Patcher { + // It shouldn't matter if we use the LockedScheme from the encoder or decoder + // TODO: Does this work with pretty encoders? + return &patcher{encoder.SchemeLock(), encoder, decoder} } type patcher struct { - *schemeAndCodec + LockedScheme + encoder Encoder + decoder Decoder +} + +func (p *patcher) Encoder() Encoder { + return p.encoder +} + +func (p *patcher) Decoder() Decoder { + return p.decoder } // ApplyOnStruct applies the given patch (JSON-encoded) using the given BytePatcher @@ -56,11 +78,11 @@ type patcher struct { // this function looks that metadata up using reflection of obj. func (p *patcher) ApplyOnStruct(bytePatcher patch.BytePatcher, patch []byte, obj runtime.Object) error { // Require that obj is typed - if !IsTyped(obj, p.scheme) { + if !IsTyped(obj, p.Scheme()) { return errors.New("obj must be typed") } // Get the GVK so we can check if obj is internal - gvk, err := GVKForObject(p.scheme, obj) + gvk, err := GVKForObject(p.Scheme(), obj) if err != nil { return err } @@ -69,12 +91,9 @@ func (p *patcher) ApplyOnStruct(bytePatcher patch.BytePatcher, patch []byte, obj return errors.New("obj must not be internal") } - // Create a non-pretty encoder - encopt := *defaultEncodeOpts().ApplyOptions([]EncodeOption{PrettyEncode(false)}) - enc := newEncoder(p.schemeAndCodec, encopt) // Encode without conversion to the buffer var buf bytes.Buffer - if err := enc.EncodeForGroupVersion(NewJSONFrameWriter(&buf), obj, gvk.GroupVersion()); err != nil { + if err := p.encoder.EncodeForGroupVersion(NewJSONFrameWriter(&buf), obj, gvk.GroupVersion()); err != nil { return err } @@ -92,8 +111,7 @@ func (p *patcher) ApplyOnStruct(bytePatcher patch.BytePatcher, patch []byte, obj // Decode into the object to apply the changes fr := NewSingleFrameReader(newJSON, ContentTypeJSON) - dec := newDecoder(p.schemeAndCodec, *defaultDecodeOpts()) - if err := dec.DecodeInto(fr, obj); err != nil { + if err := p.decoder.DecodeInto(fr, obj); err != nil { return err } diff --git a/pkg/serializer/serializer.go b/pkg/serializer/serializer.go index fbbcdd1f..46b82238 100644 --- a/pkg/serializer/serializer.go +++ b/pkg/serializer/serializer.go @@ -3,7 +3,6 @@ package serializer import ( "errors" "fmt" - "sync" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -62,20 +61,14 @@ type Serializer interface { Patcher() Patcher - // Scheme provides access to the underlying runtime.Scheme, may be used for low-level access to - // the "type universe" and advanced conversion/defaulting features - Scheme() *runtime.Scheme + // SchemeLock exposes the underlying LockedScheme. + // A Scheme provides access to the underlying runtime.Scheme, may be used for low-level access to + // the "type universe" and advanced conversion/defaulting features. + SchemeLock() LockedScheme - // Codecs provides access to the underlying serializer.CodecFactory, may be used if low-level access - // is needed for encoding and decoding - Codecs() *k8sserializer.CodecFactory -} - -type schemeAndCodec struct { - // scheme is not thread-safe, hence it is guarded by a mutex - scheme *runtime.Scheme - schemeMu *sync.Mutex - codecs *k8sserializer.CodecFactory + // CodecFactory provides access to the underlying CodecFactory, may be used if low-level access + // is needed for encoding and decoding. + CodecFactory() *k8sserializer.CodecFactory } // Encoder is a high-level interface for encoding Kubernetes API Machinery objects and writing them @@ -91,6 +84,12 @@ type Encoder interface { // is not of that version currently it will try to convert. The output bytes are written to the // FrameWriter. The FrameWriter specifies the ContentType. EncodeForGroupVersion(fw FrameWriter, obj runtime.Object, gv schema.GroupVersion) error + + // SchemeLock exposes the underlying LockedScheme + SchemeLock() LockedScheme + + // CodecFactory exposes the underlying CodecFactory + CodecFactory() *k8sserializer.CodecFactory } // Decoder is a high-level interface for decoding Kubernetes API Machinery objects read from @@ -149,6 +148,9 @@ type Decoder interface { // If opts.DecodeUnknown is true, any type with an unrecognized apiVersion/kind will be returned as a // *runtime.Unknown object instead of returning a UnrecognizedTypeError. DecodeAll(fr FrameReader) ([]runtime.Object, error) + + // SchemeLock exposes the underlying LockedScheme + SchemeLock() LockedScheme } // Converter is an interface that allows access to object conversion capabilities @@ -168,6 +170,9 @@ type Converter interface { // or the sigs.k8s.io/controller-runtime/pkg/conversion.Hub for the given conversion.Convertible object in // the "in" argument. No defaulting is performed. ConvertToHub(in runtime.Object) (runtime.Object, error) + + // SchemeLock exposes the underlying LockedScheme + SchemeLock() LockedScheme } // Defaulter is a high-level interface for accessing defaulting functions in a scheme @@ -183,6 +188,9 @@ type Defaulter interface { // scheme.Default(obj), but with extra logic to cover also internal versions. // Important to note here is that the TypeMeta information is NOT applied automatically. NewDefaultedObject(gvk schema.GroupVersionKind) (runtime.Object, error) + + // SchemeLock exposes the underlying LockedScheme + SchemeLock() LockedScheme } // NewSerializer constructs a new serializer based on a scheme, and optionally a codecfactory @@ -197,45 +205,42 @@ func NewSerializer(scheme *runtime.Scheme, codecs *k8sserializer.CodecFactory) S *codecs = k8sserializer.NewCodecFactory(scheme) } - schemeCodec := &schemeAndCodec{ - scheme: scheme, - schemeMu: &sync.Mutex{}, - codecs: codecs, - } + schemeLock := newLockedScheme(scheme) + return &serializer{ - schemeAndCodec: schemeCodec, - converter: newConverter(scheme), - defaulter: newDefaulter(scheme), - patcher: &patcher{schemeCodec}, + LockedScheme: schemeLock, + converter: NewConverter(schemeLock), + defaulter: NewDefaulter(schemeLock), + patcher: NewPatcher( + NewEncoder(schemeLock, codecs, PrettyEncode(true)), + NewDecoder(schemeLock), + ), } } // serializer implements the Serializer interface type serializer struct { - *schemeAndCodec + LockedScheme + codecs *k8sserializer.CodecFactory converter *converter - defaulter *defaulter - patcher *patcher + defaulter Defaulter + patcher Patcher } -// Scheme provides access to the underlying runtime.Scheme, may be used for low-level access to -// the "type universe" and advanced conversion/defaulting features -func (s *serializer) Scheme() *runtime.Scheme { - return s.scheme +func (s *serializer) SchemeLock() LockedScheme { + return s.LockedScheme } -// Codecs provides access to the underlying serializer.CodecFactory, may be used if low-level access -// is needed for encoding and decoding -func (s *serializer) Codecs() *k8sserializer.CodecFactory { +func (s *serializer) CodecFactory() *k8sserializer.CodecFactory { return s.codecs } func (s *serializer) Decoder(opts ...DecodeOption) Decoder { - return newDecoder(s.schemeAndCodec, *defaultDecodeOpts().ApplyOptions(opts)) + return NewDecoder(s.LockedScheme, opts...) } func (s *serializer) Encoder(opts ...EncodeOption) Encoder { - return newEncoder(s.schemeAndCodec, *defaultEncodeOpts().ApplyOptions(opts)) + return NewEncoder(s.LockedScheme, s.codecs, opts...) } func (s *serializer) Converter() Converter { diff --git a/pkg/serializer/utils.go b/pkg/serializer/utils.go index f916a7a5..ed89b6c7 100644 --- a/pkg/serializer/utils.go +++ b/pkg/serializer/utils.go @@ -3,6 +3,7 @@ package serializer import ( "fmt" "strings" + "sync" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -13,6 +14,35 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ) +// LockedScheme describes a shared scheme that should be locked before writing, and unlocked +// after writing. Reading can be done safely without any locking. +type LockedScheme interface { + Scheme() *runtime.Scheme + SchemeLock() + SchemeUnlock() +} + +func newLockedScheme(scheme *runtime.Scheme) LockedScheme { + return &lockedScheme{scheme, &sync.Mutex{}} +} + +type lockedScheme struct { + scheme *runtime.Scheme + mu *sync.Mutex +} + +func (s *lockedScheme) Scheme() *runtime.Scheme { + return s.scheme +} + +func (s *lockedScheme) SchemeLock() { + s.mu.Lock() +} + +func (s *lockedScheme) SchemeUnlock() { + s.mu.Unlock() +} + func GVKForObject(scheme *runtime.Scheme, obj runtime.Object) (schema.GroupVersionKind, error) { // Safety check: one should not do this if obj == nil || obj.GetObjectKind() == nil { diff --git a/pkg/storage/backend/backend.go b/pkg/storage/backend/backend.go index 39d769ec..422a1850 100644 --- a/pkg/storage/backend/backend.go +++ b/pkg/storage/backend/backend.go @@ -10,7 +10,6 @@ import ( "github.com/weaveworks/libgitops/pkg/storage" "github.com/weaveworks/libgitops/pkg/storage/core" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" ) @@ -33,7 +32,8 @@ var ( type Accessors interface { Storage() storage.Storage NamespaceEnforcer() NamespaceEnforcer - Scheme() *runtime.Scheme + Encoder() serializer.Encoder + Decoder() serializer.Decoder } type WriteAccessors interface { @@ -89,7 +89,8 @@ type StorageVersioner interface { func NewGeneric( storage storage.Storage, - serializer serializer.Serializer, // TODO: only scheme required, encode/decode optional? + encoder serializer.Encoder, + decoder serializer.Decoder, enforcer NamespaceEnforcer, validator Validator, // TODO: optional? versioner StorageVersioner, // TODO: optional? @@ -97,17 +98,21 @@ func NewGeneric( if storage == nil { return nil, fmt.Errorf("storage is mandatory") } - if serializer == nil { // TODO: relax this to scheme, and add encoder/decoder to opts? - return nil, fmt.Errorf("serializer is mandatory") + if encoder == nil { + return nil, fmt.Errorf("encoder is mandatory") + } + if decoder == nil { + return nil, fmt.Errorf("decoder is mandatory") } if enforcer == nil { return nil, fmt.Errorf("enforcer is mandatory") } // TODO: validate options return &Generic{ - scheme: serializer.Scheme(), - encoder: serializer.Encoder(), - decoder: serializer.Decoder(), + // It shouldn't matter if we use the encoder's or decoder's SchemeLock + LockedScheme: encoder.SchemeLock(), + encoder: encoder, + decoder: decoder, storage: storage, enforcer: enforcer, @@ -119,9 +124,9 @@ func NewGeneric( var _ Backend = &Generic{} type Generic struct { - scheme *runtime.Scheme - decoder serializer.Decoder + serializer.LockedScheme encoder serializer.Encoder + decoder serializer.Decoder storage storage.Storage enforcer NamespaceEnforcer @@ -129,8 +134,12 @@ type Generic struct { versioner StorageVersioner } -func (b *Generic) Scheme() *runtime.Scheme { - return b.scheme +func (b *Generic) Encoder() serializer.Encoder { + return b.encoder +} + +func (b *Generic) Decoder() serializer.Decoder { + return b.decoder } func (b *Generic) Storage() storage.Storage { @@ -304,7 +313,7 @@ func (b *Generic) Delete(ctx context.Context, obj core.Object) error { // Note: This should also work for unstructured and partial metadata objects func (b *Generic) idForObj(ctx context.Context, obj core.Object) (core.ObjectID, error) { - gvk, err := serializer.GVKForObject(b.scheme, obj) + gvk, err := serializer.GVKForObject(b.Scheme(), obj) if err != nil { return nil, err } diff --git a/pkg/storage/client/client.go b/pkg/storage/client/client.go index 9c216a07..822bd63d 100644 --- a/pkg/storage/client/client.go +++ b/pkg/storage/client/client.go @@ -54,11 +54,11 @@ type Client interface { // NewGeneric constructs a new Generic client // TODO: Construct the default patcher from the given scheme, make patcher an opt instead -func NewGeneric(backend backend.Backend, patcher serializer.Patcher) (*Generic, error) { +func NewGeneric(backend backend.Backend) (*Generic, error) { if backend == nil { return nil, fmt.Errorf("backend is mandatory") } - return &Generic{backend, patcher}, nil + return &Generic{backend, serializer.NewPatcher(backend.Encoder(), backend.Decoder())}, nil } // Generic implements the Client interface @@ -92,7 +92,7 @@ func (c *Generic) Get(ctx context.Context, key core.ObjectKey, obj core.Object) // TODO: Create constructors for the different kinds of lists? func (c *Generic) List(ctx context.Context, list core.ObjectList, opts ...client.ListOption) error { // This call will verify that list actually is a List type. - gvk, err := serializer.GVKForList(list, c.Backend().Scheme()) + gvk, err := serializer.GVKForList(list, c.Scheme()) if err != nil { return err } @@ -134,7 +134,7 @@ func (c *Generic) List(ctx context.Context, list core.ObjectList, opts ...client objs := make([]kruntime.Object, 0, len(allIDs)) // How should the object be created? - createFunc := createObject(gvk, c.Backend().Scheme()) + createFunc := createObject(gvk, c.Scheme()) if serializer.IsPartialObjectList(list) { createFunc = createPartialObject(gvk) } else if serializer.IsUnstructuredList(list) { @@ -222,7 +222,7 @@ func (c *Generic) DeleteAllOf(ctx context.Context, obj core.Object, opts ...clie customDeleteAllOpts := (&DeleteAllOfOptions{}).ApplyOptions(opts) // Get the GVK of the object - gvk, err := serializer.GVKForObject(c.Backend().Scheme(), obj) + gvk, err := serializer.GVKForObject(c.Scheme(), obj) if err != nil { return err } @@ -246,7 +246,7 @@ func (c *Generic) DeleteAllOf(ctx context.Context, obj core.Object, opts ...clie // Scheme returns the scheme this client is using. func (c *Generic) Scheme() *kruntime.Scheme { - return c.backend.Scheme() + return c.Backend().Encoder().SchemeLock().Scheme() } // RESTMapper returns the rest this client is using. For now, this returns nil, so don't use. diff --git a/pkg/storage/core/recognizer.go b/pkg/storage/core/recognizer.go index fac0fe12..92dafd03 100644 --- a/pkg/storage/core/recognizer.go +++ b/pkg/storage/core/recognizer.go @@ -9,30 +9,30 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// SerializerObjectRecognizer implements ObjectRecognizer. -var _ ObjectRecognizer = &SerializerObjectRecognizer{} +// KubeObjectRecognizer implements ObjectRecognizer. +var _ ObjectRecognizer = &KubeObjectRecognizer{} -// SerializerObjectRecognizer is a simple implementation of ObjectRecognizer, that +// KubeObjectRecognizer is a simple implementation of ObjectRecognizer, that // decodes the given byte content with the assumption that it is YAML (which covers // both YAML and JSON formats) into a *metav1.PartialObjectMetadata, which allows // extracting the ObjectID from any Kubernetes API Machinery-compatible Object. // // This operation works even though *metav1.PartialObjectMetadata is not registered // with the underlying Scheme in any way. -type SerializerObjectRecognizer struct { - // Serializer is a required field in order for ResolveObjectID to function. - Serializer serializer.Serializer +type KubeObjectRecognizer struct { + // Decoder is a required field in order for ResolveObjectID to function. + Decoder serializer.Decoder // AllowUnrecognized controls whether this implementation allows recognizing // GVK combinations not known to the underlying Scheme. Default: false AllowUnrecognized bool } -func (r *SerializerObjectRecognizer) ResolveObjectID(_ context.Context, _ string, content []byte) (ObjectID, error) { - if r.Serializer == nil { - return nil, errors.New("programmer error: SerializerObjectRecognizer.Serializer is nil") +func (r *KubeObjectRecognizer) ResolveObjectID(_ context.Context, _ string, content []byte) (ObjectID, error) { + if r.Decoder == nil { + return nil, errors.New("programmer error: KubeObjectRecognizer.Decoder is nil") } metaObj := &metav1.PartialObjectMetadata{} - err := r.Serializer.Decoder().DecodeInto( + err := r.Decoder.DecodeInto( serializer.NewSingleFrameReader(content, serializer.ContentTypeYAML), metaObj, ) @@ -50,7 +50,7 @@ func (r *SerializerObjectRecognizer) ResolveObjectID(_ context.Context, _ string if metaObj.Kind == "" { return nil, fmt.Errorf(".metadata.name field must not be empty") } - if !r.AllowUnrecognized && !r.Serializer.Scheme().Recognizes(gvk) { + if !r.AllowUnrecognized && !r.Decoder.SchemeLock().Scheme().Recognizes(gvk) { return nil, fmt.Errorf("GroupVersionKind %v not recognized by the scheme", gvk) } From 970642ef8af0e93d0a8a16821c4f1ebcec946101 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Fri, 29 Jan 2021 17:35:53 +0200 Subject: [PATCH 093/149] Create and use a sample StorageVersioner. --- cmd/sample-app/main.go | 7 ++++++- cmd/sample-gitops/main.go | 5 ++++- cmd/sample-watch/main.go | 5 ++++- pkg/serializer/defaulter.go | 2 +- pkg/serializer/encode.go | 2 +- pkg/serializer/options.go | 2 -- pkg/serializer/serializer.go | 11 ----------- pkg/serializer/utils.go | 12 ++++++++++++ pkg/storage/backend/backend.go | 19 ++++++++----------- pkg/storage/backend/versioner.go | 31 +++++++++++++++++++++++++++++++ 10 files changed, 67 insertions(+), 29 deletions(-) create mode 100644 pkg/storage/backend/versioner.go diff --git a/cmd/sample-app/main.go b/cmd/sample-app/main.go index f915d941..0e9eb076 100644 --- a/cmd/sample-app/main.go +++ b/cmd/sample-app/main.go @@ -63,9 +63,14 @@ func run(manifestDir string) error { return err } + // Just use default encoders and decoders encoder := scheme.Serializer.Encoder() decoder := scheme.Serializer.Decoder() - b, err := backend.NewGeneric(s, encoder, decoder, kube.NewNamespaceEnforcer(), nil, nil) + + // Use the version information in the scheme to determine the storage version + versioner := backend.SchemePreferredVersioner{Scheme: scheme.Scheme} + + b, err := backend.NewGeneric(s, encoder, decoder, kube.NewNamespaceEnforcer(), versioner, nil) if err != nil { return err } diff --git a/cmd/sample-gitops/main.go b/cmd/sample-gitops/main.go index 45820349..37dbe6ca 100644 --- a/cmd/sample-gitops/main.go +++ b/cmd/sample-gitops/main.go @@ -166,7 +166,10 @@ func run(identityFile, gitURL, ghToken, authorName, authorEmail, prMilestone str defer func() { _ = rawManifest.Close() }() - b, err := backend.NewGeneric(rawManifest, encoder, decoder, kube.NewNamespaceEnforcer(), nil, nil) + // Use the version information in the scheme to determine the storage version + versioner := backend.SchemePreferredVersioner{Scheme: scheme.Scheme} + + b, err := backend.NewGeneric(rawManifest, encoder, decoder, kube.NewNamespaceEnforcer(), versioner, nil) if err != nil { return err } diff --git a/cmd/sample-watch/main.go b/cmd/sample-watch/main.go index 11f5c553..3d03e8fd 100644 --- a/cmd/sample-watch/main.go +++ b/cmd/sample-watch/main.go @@ -69,7 +69,10 @@ func run(watchDir string) error { return err } - b, err := backend.NewGeneric(rawManifest, encoder, decoder, kube.NewNamespaceEnforcer(), nil, nil) + // Use the version information in the scheme to determine the storage version + versioner := backend.SchemePreferredVersioner{Scheme: scheme.Scheme} + + b, err := backend.NewGeneric(rawManifest, encoder, decoder, kube.NewNamespaceEnforcer(), versioner, nil) if err != nil { return err } diff --git a/pkg/serializer/defaulter.go b/pkg/serializer/defaulter.go index d0b117f3..e94093cd 100644 --- a/pkg/serializer/defaulter.go +++ b/pkg/serializer/defaulter.go @@ -60,7 +60,7 @@ func (d *defaulter) runDefaulting(obj runtime.Object) error { // We know that the current object is internal // Get the preferred external version... - gv, err := prioritizedVersionForGroup(d.Scheme(), gvk.Group) + gv, err := PreferredVersionForGroup(d.Scheme(), gvk.Group) if err != nil { return err } diff --git a/pkg/serializer/encode.go b/pkg/serializer/encode.go index ae46760b..ace63061 100644 --- a/pkg/serializer/encode.go +++ b/pkg/serializer/encode.go @@ -49,7 +49,7 @@ func (e *encoder) Encode(fw FrameWriter, objs ...runtime.Object) error { // If the object is internal, convert it to the preferred external one if gvk.Version == runtime.APIVersionInternal { - gv, err := prioritizedVersionForGroup(e.Scheme(), gvk.Group) + gv, err := PreferredVersionForGroup(e.Scheme(), gvk.Group) if err != nil { return err } diff --git a/pkg/serializer/options.go b/pkg/serializer/options.go index e5736e62..e4d8fe4a 100644 --- a/pkg/serializer/options.go +++ b/pkg/serializer/options.go @@ -4,8 +4,6 @@ import ( "k8s.io/utils/pointer" ) -// TODO: Import k8s.io/utils/pointer instead of baking our own ptrutils package. - type EncodeOption interface { ApplyToEncode(*EncodeOptions) } diff --git a/pkg/serializer/serializer.go b/pkg/serializer/serializer.go index 46b82238..edb6f38c 100644 --- a/pkg/serializer/serializer.go +++ b/pkg/serializer/serializer.go @@ -2,7 +2,6 @@ package serializer import ( "errors" - "fmt" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -254,13 +253,3 @@ func (s *serializer) Defaulter() Defaulter { func (s *serializer) Patcher() Patcher { return s.patcher } - -func prioritizedVersionForGroup(scheme *runtime.Scheme, groupName string) (schema.GroupVersion, error) { - // Get the prioritized versions for the given group - gvs := scheme.PrioritizedVersionsForGroup(groupName) - if len(gvs) < 1 { - return schema.GroupVersion{}, fmt.Errorf("expected some version to be registered for group %s", groupName) - } - // Use the first, preferred, (external) version - return gvs[0], nil -} diff --git a/pkg/serializer/utils.go b/pkg/serializer/utils.go index ed89b6c7..88ce6d82 100644 --- a/pkg/serializer/utils.go +++ b/pkg/serializer/utils.go @@ -98,6 +98,18 @@ func GVKForList(obj client.ObjectList, scheme *runtime.Scheme) (schema.GroupVers return gvk, nil } +// PreferredVersionForGroup returns the most preferred version of a group in the scheme. +// In order to tell the scheme what your preferred ordering is, use scheme.SetVersionPriority(). +func PreferredVersionForGroup(scheme *runtime.Scheme, groupName string) (schema.GroupVersion, error) { + // Get the prioritized versions for the given group + gvs := scheme.PrioritizedVersionsForGroup(groupName) + if len(gvs) < 1 { + return schema.GroupVersion{}, fmt.Errorf("expected some version to be registered for group %s", groupName) + } + // Use the first, preferred, (external) version + return gvs[0], nil +} + // EqualsGK returns true if gk1 and gk2 have the same fields. func EqualsGK(gk1, gk2 schema.GroupKind) bool { return gk1.Group == gk2.Group && gk1.Kind == gk2.Kind diff --git a/pkg/storage/backend/backend.go b/pkg/storage/backend/backend.go index 422a1850..20032447 100644 --- a/pkg/storage/backend/backend.go +++ b/pkg/storage/backend/backend.go @@ -82,18 +82,13 @@ type Validator interface { ValidateChange(ctx context.Context, backend Reader, op ChangeOperation, obj core.Object) error } -type StorageVersioner interface { - // TODO: Do we need the context here? - StorageVersion(ctx context.Context, id core.ObjectID) (core.GroupVersion, error) -} - func NewGeneric( storage storage.Storage, encoder serializer.Encoder, decoder serializer.Decoder, enforcer NamespaceEnforcer, + versioner StorageVersioner, validator Validator, // TODO: optional? - versioner StorageVersioner, // TODO: optional? ) (*Generic, error) { if storage == nil { return nil, fmt.Errorf("storage is mandatory") @@ -107,6 +102,9 @@ func NewGeneric( if enforcer == nil { return nil, fmt.Errorf("enforcer is mandatory") } + if versioner == nil { + return nil, fmt.Errorf("versioner is mandatory") + } // TODO: validate options return &Generic{ // It shouldn't matter if we use the encoder's or decoder's SchemeLock @@ -259,17 +257,16 @@ func (b *Generic) UpdateStatus(ctx context.Context, obj core.Object) error { } func (b *Generic) write(ctx context.Context, id core.ObjectID, obj core.Object) error { - // TODO: Figure out how to get ContentType before the object actually exists! + // Get the content type of the object ct, err := b.storage.ContentType(ctx, id) if err != nil { return err } // Resolve the desired storage version - /* TODO: re-enable later - gv, err := b.versioner.StorageVersion(ctx, id) + gv, err := b.versioner.StorageVersion(id) if err != nil { return err - }*/ + } // Set creationTimestamp if not already populated t := obj.GetCreationTimestamp() @@ -279,7 +276,7 @@ func (b *Generic) write(ctx context.Context, id core.ObjectID, obj core.Object) var objBytes bytes.Buffer // TODO: Work with any ContentType, not just JSON/YAML. Or, make a SingleFrameWriter for any ct. - err = b.encoder.Encode(serializer.NewFrameWriter(ct, &objBytes), obj) + err = b.encoder.EncodeForGroupVersion(serializer.NewFrameWriter(ct, &objBytes), obj, gv) if err != nil { return err } diff --git a/pkg/storage/backend/versioner.go b/pkg/storage/backend/versioner.go new file mode 100644 index 00000000..93b18934 --- /dev/null +++ b/pkg/storage/backend/versioner.go @@ -0,0 +1,31 @@ +package backend + +import ( + "fmt" + + "github.com/weaveworks/libgitops/pkg/serializer" + "github.com/weaveworks/libgitops/pkg/storage/core" + "k8s.io/apimachinery/pkg/runtime" +) + +// StorageVersioner is an interface that determines what version the Object +// with the given ID should be serialized as. +type StorageVersioner interface { + StorageVersion(id core.ObjectID) (core.GroupVersion, error) +} + +// SchemePreferredVersioner uses the prioritization information in the runtime.Scheme to +// determine what the preferred version should be. The caller is responsible for +// registering this information with the scheme using scheme.SetVersionPriority() before +// using this StorageVersioner. If SetVersionPriority has not been run, the version returned +// completely arbitrary. +type SchemePreferredVersioner struct { + Scheme *runtime.Scheme +} + +func (v SchemePreferredVersioner) StorageVersion(id core.ObjectID) (core.GroupVersion, error) { + if v.Scheme == nil { + return core.GroupVersion{}, fmt.Errorf("programmer error: SchemePreferredVersioner.Scheme must not be nil") + } + return serializer.PreferredVersionForGroup(v.Scheme, id.GroupKind().Group) +} From 33809d810a9d1cd07e16743f39f14c1c9f88e55f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Fri, 29 Jan 2021 17:43:06 +0200 Subject: [PATCH 094/149] Fix some comments. --- pkg/storage/backend/backend.go | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/pkg/storage/backend/backend.go b/pkg/storage/backend/backend.go index 20032447..18a934b6 100644 --- a/pkg/storage/backend/backend.go +++ b/pkg/storage/backend/backend.go @@ -82,13 +82,19 @@ type Validator interface { ValidateChange(ctx context.Context, backend Reader, op ChangeOperation, obj core.Object) error } +// NewGeneric creates a new generic Backend for the given underlying Storage for storing the +// objects once serialized, encoders and decoders for (de)serialization, the NamespaceEnforcer +// for enforcing a namespacing policy, the StorageVersioner for telling the encoder what version +// of many to use when encoding, and optionally, a Validator. +// +// All parameters except the validator are mandatory. func NewGeneric( storage storage.Storage, encoder serializer.Encoder, decoder serializer.Decoder, enforcer NamespaceEnforcer, versioner StorageVersioner, - validator Validator, // TODO: optional? + validator Validator, ) (*Generic, error) { if storage == nil { return nil, fmt.Errorf("storage is mandatory") @@ -105,7 +111,6 @@ func NewGeneric( if versioner == nil { return nil, fmt.Errorf("versioner is mandatory") } - // TODO: validate options return &Generic{ // It shouldn't matter if we use the encoder's or decoder's SchemeLock LockedScheme: encoder.SchemeLock(), @@ -173,8 +178,7 @@ func (b *Generic) Get(ctx context.Context, obj core.Object) error { return err } - // TODO: Support various decoding options, e.g. defaulting? - // TODO: Does this "replace" already-set fields? + // TODO: Check if the decoder "replaces" already-set fields or "leaks" old data? return b.decoder.DecodeInto(serializer.NewSingleFrameReader(content, ct), obj) } @@ -275,7 +279,8 @@ func (b *Generic) write(ctx context.Context, id core.ObjectID, obj core.Object) } var objBytes bytes.Buffer - // TODO: Work with any ContentType, not just JSON/YAML. Or, make a SingleFrameWriter for any ct. + // TODO: Work with any ContentType, not just JSON/YAML. Make a SingleFrameWriter + // that works for any ContentType, and just ever writes one doc (which is what we need) err = b.encoder.EncodeForGroupVersion(serializer.NewFrameWriter(ct, &objBytes), obj, gv) if err != nil { return err From a60636f4c6970aa64eb3fe74c61e439502388627 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 1 Feb 2021 21:59:45 +0200 Subject: [PATCH 095/149] Add more comments --- cmd/sample-gitops/main.go | 20 ++++++++++++------- pkg/storage/client/transactional/client.go | 9 +++++++-- .../transactional/distributed/git/git.go | 2 ++ pkg/storage/client/transactional/handlers.go | 19 +++++++++++++++++- .../client/transactional/interfaces.go | 11 ++++++++++ 5 files changed, 51 insertions(+), 10 deletions(-) diff --git a/cmd/sample-gitops/main.go b/cmd/sample-gitops/main.go index 37dbe6ca..e787ba37 100644 --- a/cmd/sample-gitops/main.go +++ b/cmd/sample-gitops/main.go @@ -184,6 +184,7 @@ func run(identityFile, gitURL, ghToken, authorName, authorEmail, prMilestone str return err } + // Note: This will add itself to the Commit/TxHook chains on the localClone. txClient, err := distributed.NewClient(txGeneralClient, localClone) if err != nil { return err @@ -235,22 +236,27 @@ func run(identityFile, gitURL, ghToken, authorName, authorEmail, prMilestone str return echo.NewHTTPError(http.StatusBadRequest, "Please set name") } + // Create an empty typed object, the data from the client will be written into it + // at .Get-time below. car := v1alpha1.Car{} carKey := core.ObjectKey{Name: name} - + // Specify what our "base" branch is in the context; make it match the main branch + // of the Git clone. branchCtx := core.WithVersionRef(ctx, core.NewBranchRef(localClone.MainBranch())) - + // Our head branch is the name of the Car, and it ends in a "-", which makes the + // TxClient add a random sha suffix. headBranch := fmt.Sprintf("%s-update-", name) + err := txClient. - BranchTransaction(branchCtx, headBranch). - Get(carKey, &car). - Custom(func(ctx context.Context) error { + BranchTransaction(branchCtx, headBranch). // Start a transaction of the base branch to the head + Get(carKey, &car). // Load the latest data of the Car into &car. + Custom(func(ctx context.Context) error { // Mutate (update) status of the Car car.Status.Distance = rand.Uint64() car.Status.Speed = rand.Float64() * 100 return nil }). - Update(&car). - CreateTx(githubpr.GenericPullRequest{ + Update(&car). // Store the changed car in the Storage + CreateTx(githubpr.GenericPullRequest{ // Create a commit for the tx; return the super-set PR commit Commit: transactional.GenericCommit{ Author: transactional.GenericCommitAuthor{ Name: authorName, diff --git a/pkg/storage/client/transactional/client.go b/pkg/storage/client/transactional/client.go index 1108c1d7..e9b034e7 100644 --- a/pkg/storage/client/transactional/client.go +++ b/pkg/storage/client/transactional/client.go @@ -37,7 +37,9 @@ func NewGeneric(c client.Client, manager BranchManager, merger BranchMerger) (Cl type Generic struct { c client.Client - txs map[string]*txLock + // txs maps branches to their tx locks + txs map[string]*txLock + // txsMu guards reads and writes of txs txsMu *sync.Mutex // +optional @@ -47,7 +49,10 @@ type Generic struct { } type txLock struct { - mu *sync.RWMutex + // mu is locked for writing while the transaction is executing, and locked + // for reading, while a read operation is active. + mu *sync.RWMutex + // mode specifies what transaction mode is used; Atomic or AllowReading. mode TxMode // active == 1 means "transaction active, mu is locked for writing" // active == 0 means "transaction has stopped, mu has been unlocked" diff --git a/pkg/storage/client/transactional/distributed/git/git.go b/pkg/storage/client/transactional/distributed/git/git.go index 53cf157f..6fb14d83 100644 --- a/pkg/storage/client/transactional/distributed/git/git.go +++ b/pkg/storage/client/transactional/distributed/git/git.go @@ -307,6 +307,8 @@ func (d *LocalClone) ResetToCleanBranch(_ context.Context, branch string) error Dir: true, }) // Force-checkout the main branch + // TODO: If a transaction (non-branched) was able to commit, and failed after that + // we need to roll back that commit. return d.wt.Checkout(&git.CheckoutOptions{ Branch: plumbing.NewBranchReferenceName(branch), Force: true, diff --git a/pkg/storage/client/transactional/handlers.go b/pkg/storage/client/transactional/handlers.go index aa438e3f..0b38adea 100644 --- a/pkg/storage/client/transactional/handlers.go +++ b/pkg/storage/client/transactional/handlers.go @@ -15,8 +15,18 @@ type CommitHookChain interface { Register(CommitHook) } +// CommitHook executes directly before and after a commit is being made. +// If the transaction fails before a commit could happen, these will never +// be run. type CommitHook interface { + // PreCommitHook executes arbitrary logic for the given transaction info + // and commit info; if an error is returned, the commit won't happen. PreCommitHook(ctx context.Context, commit Commit, info TxInfo) error + // PostCommitHook executes arbitrary logic for the given transaction info + // and commit info; if an error is returned, the commit will happen in the + // case of a BranchTx on the head branch; but the transaction itself will + // fail. In the case of a "normal" transaction; the commit will be made, + // but later rolled back. PostCommitHook(ctx context.Context, commit Commit, info TxInfo) error } @@ -58,12 +68,19 @@ func (m *MultiCommitHook) PostCommitHook(ctx context.Context, commit Commit, inf type TransactionHookChain interface { // The chain also itself implements TransactionHook TransactionHook - // Register registers a new CommitHook to the chain + // Register registers a new TransactionHook to the chain Register(TransactionHook) } +// TransactionHook provides a way to extend transaction behavior. Regardless +// of the result of the transaction; these will always be run. type TransactionHook interface { + // PreTransactionHook executes before CreateBranch has been called for the + // BranchManager in BranchTx mode; and in any case before any user-tx-specific + // code starts executing. PreTransactionHook(ctx context.Context, info TxInfo) error + // PostTransactionHook executes when a transaction is terminated, either due + // to an Abort() or a successful Commit() or CreateTx(). PostTransactionHook(ctx context.Context, info TxInfo) error } diff --git a/pkg/storage/client/transactional/interfaces.go b/pkg/storage/client/transactional/interfaces.go index 7371f4c3..cea5b00c 100644 --- a/pkg/storage/client/transactional/interfaces.go +++ b/pkg/storage/client/transactional/interfaces.go @@ -11,15 +11,26 @@ type Client interface { client.Reader BranchManager() BranchManager + // BranchMerger is optional. BranchMerger() BranchMerger + // Transaction creates a new transaction on the branch stored in the context, so that + // no other writes to that branch can take place meanwhile. Transaction(ctx context.Context, opts ...TxOption) Tx + // BranchTransaction creates a new "head" branch with the given {branchName} name, based + // on the "base" branch in the context. The "base" branch is not locked for writing while + // the transaction is running, but the head branch is. BranchTransaction(ctx context.Context, branchName string, opts ...TxOption) BranchTx } type BranchManager interface { + // CreateBranch creates a new branch with the given target branch name. It forks out + // of the branch specified in the context. CreateBranch(ctx context.Context, branch string) error + // ResetToCleanBranch switches back to the given branch; but first discards all non-committed + // changes. ResetToCleanBranch(ctx context.Context, branch string) error + // Commit creates a new commit for the branch stored in the context. Commit(ctx context.Context, commit Commit) error // CommitHookChain must be non-nil, but can be a no-op From 90ac7b53e7386519d5c6f0c3103be698ed1c539d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 1 Feb 2021 22:00:24 +0200 Subject: [PATCH 096/149] Remove unnecessary "Commons" --- pkg/storage/event/interfaces.go | 19 +++++++------------ .../filesystem/fileevents/interfaces.go | 17 +++++------------ .../filesystem/unstructured/event/storage.go | 17 ++++++++--------- 3 files changed, 20 insertions(+), 33 deletions(-) diff --git a/pkg/storage/event/interfaces.go b/pkg/storage/event/interfaces.go index b13c1860..7d2a1e69 100644 --- a/pkg/storage/event/interfaces.go +++ b/pkg/storage/event/interfaces.go @@ -7,9 +7,13 @@ import ( "github.com/weaveworks/libgitops/pkg/storage" ) -// StorageCommon contains the methods that EventStorage adds to the -// to the normal Storage. -type StorageCommon interface { +// EventStorage is the abstract combination of a normal Storage, and +// a possiblility to listen for changes to objects as they change. +// TODO: Maybe we could use some of controller-runtime's built-in functionality +// for watching for changes? +type Storage interface { + storage.Storage + // WatchForObjectEvents starts feeding ObjectEvents into the given "into" // channel. The caller is responsible for setting a channel buffering // limit large enough to not block normal operation. An error might @@ -20,12 +24,3 @@ type StorageCommon interface { // Close closes the EventStorage and underlying resources gracefully. io.Closer } - -// EventStorage is the abstract combination of a normal Storage, and -// a possiblility to listen for changes to objects as they change. -// TODO: Maybe we could use some of controller-runtime's built-in functionality -// for watching for changes? -type EventStorage interface { - storage.Storage - StorageCommon -} diff --git a/pkg/storage/filesystem/fileevents/interfaces.go b/pkg/storage/filesystem/fileevents/interfaces.go index 77d7708e..5a72e97d 100644 --- a/pkg/storage/filesystem/fileevents/interfaces.go +++ b/pkg/storage/filesystem/fileevents/interfaces.go @@ -39,19 +39,12 @@ type Emitter interface { io.Closer } -// StorageCommon is an extension to event.StorageCommon that -// also contains an underlying Emitter. This is meant to be -// used in tandem with filesystem.Storages. -type StorageCommon interface { - event.StorageCommon +// Storage is the union of a filesystem.Storage, and event.Storage, +// and the possibility to listen for object updates from a Emitter. +type Storage interface { + filesystem.Storage + event.Storage // FileEventsEmitter gets the Emitter used internally. FileEventsEmitter() Emitter } - -// FilesystemEventStorage is the combination of a filesystem.Storage, -// and the possibility to listen for object updates from a Emitter. -type FilesystemEventStorage interface { - filesystem.Storage - StorageCommon -} diff --git a/pkg/storage/filesystem/unstructured/event/storage.go b/pkg/storage/filesystem/unstructured/event/storage.go index 0d674b52..5bab42f8 100644 --- a/pkg/storage/filesystem/unstructured/event/storage.go +++ b/pkg/storage/filesystem/unstructured/event/storage.go @@ -16,28 +16,27 @@ import ( "github.com/weaveworks/libgitops/pkg/util/sync" ) -// UnstructuredEventStorage is an extension of raw.UnstructuredStorage, that -// adds the possiblility to listen for object updates from a FileEventsEmitter. +// Storage is a union of unstructured.Storage and fileevents.Storage. // // When the Sync() function is run; the ObjectEvents that are emitted to the // listening channels with have ObjectEvent.Type == ObjectEventSync. -type UnstructuredEventStorage interface { +type Storage interface { unstructured.Storage - fileevents.StorageCommon + fileevents.Storage } const defaultEventsBufferSize = 4096 // NewManifest is a high-level constructor for a generic // MappedFileFinder and filesystem.Storage, together with a -// inotify FileWatcher; all combined into an UnstructuredEventStorage. +// inotify FileWatcher; all combined into an unstructuredevent.Storage. func NewManifest( dir string, contentTyper filesystem.ContentTyper, namespacer core.Namespacer, recognizer core.ObjectRecognizer, pathExcluder filesystem.PathExcluder, -) (UnstructuredEventStorage, error) { +) (Storage, error) { fs := filesystem.NewOSFilesystem(dir) fileFinder := unstructured.NewGenericMappedFileFinder(contentTyper, fs) fsRaw, err := filesystem.NewGeneric(fileFinder, namespacer) @@ -72,7 +71,7 @@ func NewGeneric( s unstructured.Storage, emitter fileevents.Emitter, opts GenericStorageOptions, -) (UnstructuredEventStorage, error) { +) (Storage, error) { return &Generic{ Storage: s, emitter: emitter, @@ -96,8 +95,8 @@ type GenericStorageOptions struct { SyncAtStart bool } -// Generic implements UnstructuredEventStorage. -var _ UnstructuredEventStorage = &Generic{} +// Generic implements unstructuredevent.Storage. +var _ Storage = &Generic{} // Generic is an extended raw.Storage implementation, which provides a watcher // for watching changes in the directory managed by the embedded Storage's RawStorage. From d509be555ee62db90495c3be6f09313207a2a8b8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 1 Feb 2021 22:05:40 +0200 Subject: [PATCH 097/149] Move Namespacer to pkg/storage. --- cmd/sample-app/main.go | 3 ++- cmd/sample-gitops/main.go | 3 ++- cmd/sample-watch/main.go | 3 ++- pkg/storage/backend/enforcer.go | 4 ++-- pkg/storage/core/interfaces.go | 9 --------- pkg/storage/filesystem/filefinder_simple.go | 3 ++- pkg/storage/filesystem/storage.go | 6 +++--- pkg/storage/filesystem/unstructured/event/storage.go | 2 +- pkg/storage/interfaces.go | 11 ++++++++++- pkg/storage/kube/namespaces.go | 5 +++-- pkg/storage/{core => }/namespaces.go | 10 +++++----- pkg/storage/utils.go | 2 +- 12 files changed, 33 insertions(+), 28 deletions(-) rename pkg/storage/{core => }/namespaces.go (76%) diff --git a/cmd/sample-app/main.go b/cmd/sample-app/main.go index 0e9eb076..957e9ca1 100644 --- a/cmd/sample-app/main.go +++ b/cmd/sample-app/main.go @@ -17,6 +17,7 @@ import ( "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/scheme" "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1" "github.com/weaveworks/libgitops/pkg/serializer" + "github.com/weaveworks/libgitops/pkg/storage" "github.com/weaveworks/libgitops/pkg/storage/backend" "github.com/weaveworks/libgitops/pkg/storage/client" "github.com/weaveworks/libgitops/pkg/storage/core" @@ -53,7 +54,7 @@ func run(manifestDir string) error { s, err := filesystem.NewSimpleStorage( manifestDir, - core.StaticNamespacer{NamespacedIsDefaultPolicy: false}, + storage.StaticNamespacer{NamespacedIsDefaultPolicy: false}, filesystem.SimpleFileFinderOptions{ DisableGroupDirectory: true, ContentType: serializer.ContentTypeYAML, diff --git a/cmd/sample-gitops/main.go b/cmd/sample-gitops/main.go index e787ba37..04ae964f 100644 --- a/cmd/sample-gitops/main.go +++ b/cmd/sample-gitops/main.go @@ -21,6 +21,7 @@ import ( "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/scheme" "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1" "github.com/weaveworks/libgitops/pkg/serializer" + "github.com/weaveworks/libgitops/pkg/storage" "github.com/weaveworks/libgitops/pkg/storage/backend" "github.com/weaveworks/libgitops/pkg/storage/client" "github.com/weaveworks/libgitops/pkg/storage/client/transactional" @@ -150,7 +151,7 @@ func run(identityFile, gitURL, ghToken, authorName, authorEmail, prMilestone str rawManifest, err := unstructuredevent.NewManifest( localClone.Dir(), filesystem.DefaultContentTyper, - core.StaticNamespacer{NamespacedIsDefaultPolicy: false}, // all objects root-spaced + storage.StaticNamespacer{NamespacedIsDefaultPolicy: false}, // all objects root-spaced &core.KubeObjectRecognizer{Decoder: decoder}, filesystem.DefaultPathExcluders(), ) diff --git a/cmd/sample-watch/main.go b/cmd/sample-watch/main.go index 3d03e8fd..615d880f 100644 --- a/cmd/sample-watch/main.go +++ b/cmd/sample-watch/main.go @@ -15,6 +15,7 @@ import ( "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/scheme" "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1" "github.com/weaveworks/libgitops/pkg/serializer" + "github.com/weaveworks/libgitops/pkg/storage" "github.com/weaveworks/libgitops/pkg/storage/backend" "github.com/weaveworks/libgitops/pkg/storage/client" "github.com/weaveworks/libgitops/pkg/storage/core" @@ -55,7 +56,7 @@ func run(watchDir string) error { rawManifest, err := unstructuredevent.NewManifest( watchDir, filesystem.DefaultContentTyper, - core.StaticNamespacer{NamespacedIsDefaultPolicy: false}, // all objects root-spaced + storage.StaticNamespacer{NamespacedIsDefaultPolicy: false}, // all objects root-spaced &core.KubeObjectRecognizer{Decoder: decoder}, filesystem.DefaultPathExcluders(), ) diff --git a/pkg/storage/backend/enforcer.go b/pkg/storage/backend/enforcer.go index 8553283e..c4c7646c 100644 --- a/pkg/storage/backend/enforcer.go +++ b/pkg/storage/backend/enforcer.go @@ -30,7 +30,7 @@ type NamespaceEnforcer interface { // // See GenericNamespaceEnforcer for an example implementation, or // pkg/storage/kube.NewNamespaceEnforcer() for a sample application. - EnforceNamespace(ctx context.Context, obj core.Object, gvk core.GroupVersionKind, namespacer core.Namespacer, lister storage.Lister) error + EnforceNamespace(ctx context.Context, obj core.Object, gvk core.GroupVersionKind, namespacer storage.Namespacer, lister storage.Lister) error } // GenericNamespaceEnforcer is a NamespaceEnforcer that: @@ -61,7 +61,7 @@ type GenericNamespaceEnforcer struct { NamespaceGroupKind *core.GroupKind } -func (e GenericNamespaceEnforcer) EnforceNamespace(ctx context.Context, obj core.Object, gvk core.GroupVersionKind, namespacer core.Namespacer, lister storage.Lister) error { +func (e GenericNamespaceEnforcer) EnforceNamespace(ctx context.Context, obj core.Object, gvk core.GroupVersionKind, namespacer storage.Namespacer, lister storage.Lister) error { // Get namespacing info namespaced, err := namespacer.IsNamespaced(gvk.GroupKind()) if err != nil { diff --git a/pkg/storage/core/interfaces.go b/pkg/storage/core/interfaces.go index b25cec3a..c4b864be 100644 --- a/pkg/storage/core/interfaces.go +++ b/pkg/storage/core/interfaces.go @@ -35,15 +35,6 @@ type DeleteAllOfOption = client.DeleteAllOfOption // Helper functions from client. var ObjectKeyFromObject = client.ObjectKeyFromObject -// Namespacer is an interface that lets the caller know if a GroupKind is namespaced -// or not. There are two ready-made implementations: -// 1. RESTMapperToNamespacer -// 2. NewStaticNamespacer -type Namespacer interface { - // IsNamespaced returns true if the GroupKind is a namespaced type - IsNamespaced(gk schema.GroupKind) (bool, error) -} - // TODO: Investigate if the ObjectRecognizer should return unversioned // or versioned ObjectID's type ObjectRecognizer interface { diff --git a/pkg/storage/filesystem/filefinder_simple.go b/pkg/storage/filesystem/filefinder_simple.go index e0e6940e..5317bc28 100644 --- a/pkg/storage/filesystem/filefinder_simple.go +++ b/pkg/storage/filesystem/filefinder_simple.go @@ -9,6 +9,7 @@ import ( "strings" "github.com/weaveworks/libgitops/pkg/serializer" + "github.com/weaveworks/libgitops/pkg/storage" "github.com/weaveworks/libgitops/pkg/storage/core" "k8s.io/apimachinery/pkg/util/sets" ) @@ -17,7 +18,7 @@ import ( // using SimpleFileFinder as the FileFinder, and the local disk as target. // If you need more advanced customizablility than provided here, you can compose // the call to filesystem.NewGeneric yourself. -func NewSimpleStorage(dir string, namespacer core.Namespacer, opts SimpleFileFinderOptions) (Storage, error) { +func NewSimpleStorage(dir string, namespacer storage.Namespacer, opts SimpleFileFinderOptions) (Storage, error) { fs := NewOSFilesystem(dir) fileFinder, err := NewSimpleFileFinder(fs, opts) if err != nil { diff --git a/pkg/storage/filesystem/storage.go b/pkg/storage/filesystem/storage.go index f3bc2870..0d1d06e5 100644 --- a/pkg/storage/filesystem/storage.go +++ b/pkg/storage/filesystem/storage.go @@ -14,7 +14,7 @@ import ( // NewGeneric creates a new Generic using the given lower-level // FileFinder and Namespacer. -func NewGeneric(fileFinder FileFinder, namespacer core.Namespacer) (Storage, error) { +func NewGeneric(fileFinder FileFinder, namespacer storage.Namespacer) (Storage, error) { if fileFinder == nil { return nil, fmt.Errorf("NewGeneric: fileFinder is mandatory") } @@ -33,10 +33,10 @@ func NewGeneric(fileFinder FileFinder, namespacer core.Namespacer) (Storage, err // in a generic manner. type Generic struct { fileFinder FileFinder - namespacer core.Namespacer + namespacer storage.Namespacer } -func (r *Generic) Namespacer() core.Namespacer { +func (r *Generic) Namespacer() storage.Namespacer { return r.namespacer } diff --git a/pkg/storage/filesystem/unstructured/event/storage.go b/pkg/storage/filesystem/unstructured/event/storage.go index 5bab42f8..f7dba01d 100644 --- a/pkg/storage/filesystem/unstructured/event/storage.go +++ b/pkg/storage/filesystem/unstructured/event/storage.go @@ -33,7 +33,7 @@ const defaultEventsBufferSize = 4096 func NewManifest( dir string, contentTyper filesystem.ContentTyper, - namespacer core.Namespacer, + namespacer storage.Namespacer, recognizer core.ObjectRecognizer, pathExcluder filesystem.PathExcluder, ) (Storage, error) { diff --git a/pkg/storage/interfaces.go b/pkg/storage/interfaces.go index c5698e01..ec0d041b 100644 --- a/pkg/storage/interfaces.go +++ b/pkg/storage/interfaces.go @@ -35,11 +35,20 @@ type Storage interface { // by Reader and Writer. type StorageCommon interface { // Namespacer gives access to the namespacer that is used - Namespacer() core.Namespacer + Namespacer() Namespacer // Exists checks if the resource indicated by the ID exists. Exists(ctx context.Context, id core.UnversionedObjectID) bool } +// Namespacer is an interface that lets the caller know if a GroupKind is namespaced +// or not. There are two ready-made implementations: +// 1. kube.RESTMapperToNamespacer +// 2. NewStaticNamespacer +type Namespacer interface { + // IsNamespaced returns true if the GroupKind is a namespaced type + IsNamespaced(gk core.GroupKind) (bool, error) +} + // Reader provides the read operations for the Storage. type Reader interface { StorageCommon diff --git a/pkg/storage/kube/namespaces.go b/pkg/storage/kube/namespaces.go index 3e509ceb..c32cf77e 100644 --- a/pkg/storage/kube/namespaces.go +++ b/pkg/storage/kube/namespaces.go @@ -3,6 +3,7 @@ package kube import ( "sync" + "github.com/weaveworks/libgitops/pkg/storage" "github.com/weaveworks/libgitops/pkg/storage/backend" "github.com/weaveworks/libgitops/pkg/storage/core" "k8s.io/apimachinery/pkg/api/meta" @@ -61,7 +62,7 @@ type SimpleRESTMapper interface { // k8s.io/client-go/restmapper.NewDiscoveryRESTMapper(groups []*restmapper.APIGroupResources) // in order to look up namespacing information from either a running API server, or statically, from // the list of restmapper.APIGroupResources. -func RESTMapperToNamespacer(mapper SimpleRESTMapper) core.Namespacer { +func RESTMapperToNamespacer(mapper SimpleRESTMapper) storage.Namespacer { return &restNamespacer{ mapper: mapper, mappingByType: make(map[schema.GroupKind]*meta.RESTMapping), @@ -69,7 +70,7 @@ func RESTMapperToNamespacer(mapper SimpleRESTMapper) core.Namespacer { } } -var _ core.Namespacer = &restNamespacer{} +var _ storage.Namespacer = &restNamespacer{} type restNamespacer struct { mapper SimpleRESTMapper diff --git a/pkg/storage/core/namespaces.go b/pkg/storage/namespaces.go similarity index 76% rename from pkg/storage/core/namespaces.go rename to pkg/storage/namespaces.go index d0929f56..d6df9cd3 100644 --- a/pkg/storage/core/namespaces.go +++ b/pkg/storage/namespaces.go @@ -1,7 +1,7 @@ -package core +package storage import ( - "k8s.io/apimachinery/pkg/runtime/schema" + "github.com/weaveworks/libgitops/pkg/storage/core" ) // StaticNamespacer implements Namespacer @@ -15,10 +15,10 @@ var _ Namespacer = StaticNamespacer{} // policy is reversed. type StaticNamespacer struct { NamespacedIsDefaultPolicy bool - Exceptions []schema.GroupKind + Exceptions []core.GroupKind } -func (n StaticNamespacer) IsNamespaced(gk schema.GroupKind) (bool, error) { +func (n StaticNamespacer) IsNamespaced(gk core.GroupKind) (bool, error) { if n.NamespacedIsDefaultPolicy { // namespace by default, the gks list is a list of root-scoped entities return !n.gkIsException(gk), nil @@ -27,7 +27,7 @@ func (n StaticNamespacer) IsNamespaced(gk schema.GroupKind) (bool, error) { return n.gkIsException(gk), nil } -func (n StaticNamespacer) gkIsException(target schema.GroupKind) bool { +func (n StaticNamespacer) gkIsException(target core.GroupKind) bool { for _, gk := range n.Exceptions { if gk == target { return true diff --git a/pkg/storage/utils.go b/pkg/storage/utils.go index d45323bd..799f2dc6 100644 --- a/pkg/storage/utils.go +++ b/pkg/storage/utils.go @@ -8,7 +8,7 @@ import ( // VerifyNamespaced verifies that the given GroupKind and namespace parameter follows // the rule of the Namespacer. -func VerifyNamespaced(namespacer core.Namespacer, gk core.GroupKind, ns string) error { +func VerifyNamespaced(namespacer Namespacer, gk core.GroupKind, ns string) error { // Get namespacing info namespaced, err := namespacer.IsNamespaced(gk) if err != nil { From ef8d5040801980187c6beb080d95c91669264100 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 1 Feb 2021 22:11:03 +0200 Subject: [PATCH 098/149] Move ObjectRecognizer to the unstructured package instead. --- cmd/sample-gitops/main.go | 3 ++- cmd/sample-watch/main.go | 3 ++- pkg/storage/core/interfaces.go | 8 -------- pkg/storage/filesystem/unstructured/event/storage.go | 2 +- pkg/storage/filesystem/unstructured/interfaces.go | 8 +++++++- .../{core => filesystem/unstructured}/recognizer.go | 9 +++++---- pkg/storage/filesystem/unstructured/storage.go | 7 +++---- 7 files changed, 20 insertions(+), 20 deletions(-) rename pkg/storage/{core => filesystem/unstructured}/recognizer.go (84%) diff --git a/cmd/sample-gitops/main.go b/cmd/sample-gitops/main.go index 04ae964f..65d1e7e6 100644 --- a/cmd/sample-gitops/main.go +++ b/cmd/sample-gitops/main.go @@ -31,6 +31,7 @@ import ( "github.com/weaveworks/libgitops/pkg/storage/core" "github.com/weaveworks/libgitops/pkg/storage/event" "github.com/weaveworks/libgitops/pkg/storage/filesystem" + unstructuredfs "github.com/weaveworks/libgitops/pkg/storage/filesystem/unstructured" unstructuredevent "github.com/weaveworks/libgitops/pkg/storage/filesystem/unstructured/event" "github.com/weaveworks/libgitops/pkg/storage/kube" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -152,7 +153,7 @@ func run(identityFile, gitURL, ghToken, authorName, authorEmail, prMilestone str localClone.Dir(), filesystem.DefaultContentTyper, storage.StaticNamespacer{NamespacedIsDefaultPolicy: false}, // all objects root-spaced - &core.KubeObjectRecognizer{Decoder: decoder}, + unstructuredfs.KubeObjectRecognizer{Decoder: decoder}, filesystem.DefaultPathExcluders(), ) if err != nil { diff --git a/cmd/sample-watch/main.go b/cmd/sample-watch/main.go index 615d880f..474876f8 100644 --- a/cmd/sample-watch/main.go +++ b/cmd/sample-watch/main.go @@ -21,6 +21,7 @@ import ( "github.com/weaveworks/libgitops/pkg/storage/core" "github.com/weaveworks/libgitops/pkg/storage/event" "github.com/weaveworks/libgitops/pkg/storage/filesystem" + "github.com/weaveworks/libgitops/pkg/storage/filesystem/unstructured" unstructuredevent "github.com/weaveworks/libgitops/pkg/storage/filesystem/unstructured/event" "github.com/weaveworks/libgitops/pkg/storage/kube" ) @@ -57,7 +58,7 @@ func run(watchDir string) error { watchDir, filesystem.DefaultContentTyper, storage.StaticNamespacer{NamespacedIsDefaultPolicy: false}, // all objects root-spaced - &core.KubeObjectRecognizer{Decoder: decoder}, + unstructured.KubeObjectRecognizer{Decoder: decoder}, filesystem.DefaultPathExcluders(), ) if err != nil { diff --git a/pkg/storage/core/interfaces.go b/pkg/storage/core/interfaces.go index c4b864be..cbe1b21d 100644 --- a/pkg/storage/core/interfaces.go +++ b/pkg/storage/core/interfaces.go @@ -1,8 +1,6 @@ package core import ( - "context" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" @@ -35,12 +33,6 @@ type DeleteAllOfOption = client.DeleteAllOfOption // Helper functions from client. var ObjectKeyFromObject = client.ObjectKeyFromObject -// TODO: Investigate if the ObjectRecognizer should return unversioned -// or versioned ObjectID's -type ObjectRecognizer interface { - ResolveObjectID(ctx context.Context, fileName string, content []byte) (ObjectID, error) -} - // UnversionedObjectID represents an ID for an Object whose version is not known. // However, the Group, Kind, Name and optionally, Namespace is known and should // uniquely identify the Object at a specific moment in time. diff --git a/pkg/storage/filesystem/unstructured/event/storage.go b/pkg/storage/filesystem/unstructured/event/storage.go index f7dba01d..58119438 100644 --- a/pkg/storage/filesystem/unstructured/event/storage.go +++ b/pkg/storage/filesystem/unstructured/event/storage.go @@ -34,7 +34,7 @@ func NewManifest( dir string, contentTyper filesystem.ContentTyper, namespacer storage.Namespacer, - recognizer core.ObjectRecognizer, + recognizer unstructured.ObjectRecognizer, pathExcluder filesystem.PathExcluder, ) (Storage, error) { fs := filesystem.NewOSFilesystem(dir) diff --git a/pkg/storage/filesystem/unstructured/interfaces.go b/pkg/storage/filesystem/unstructured/interfaces.go index 814b4379..020b47eb 100644 --- a/pkg/storage/filesystem/unstructured/interfaces.go +++ b/pkg/storage/filesystem/unstructured/interfaces.go @@ -22,13 +22,19 @@ type Storage interface { Sync(ctx context.Context) ([]ChecksumPathID, error) // ObjectRecognizer returns the underlying ObjectRecognizer used. - ObjectRecognizer() core.ObjectRecognizer + ObjectRecognizer() ObjectRecognizer // PathExcluder specifies what paths to not sync PathExcluder() filesystem.PathExcluder // MappedFileFinder returns the underlying MappedFileFinder used. MappedFileFinder() MappedFileFinder } +// TODO: Investigate if the ObjectRecognizer should return unversioned +// or versioned ObjectID's +type ObjectRecognizer interface { + ResolveObjectID(ctx context.Context, fileName string, content []byte) (core.ObjectID, error) +} + // MappedFileFinder is an extension to FileFinder that allows it to have an internal // cache with mappings between UnversionedObjectID and a ChecksumPath. This allows // higher-order interfaces to manage Objects in files in an unorganized directory diff --git a/pkg/storage/core/recognizer.go b/pkg/storage/filesystem/unstructured/recognizer.go similarity index 84% rename from pkg/storage/core/recognizer.go rename to pkg/storage/filesystem/unstructured/recognizer.go index 92dafd03..e4b2d441 100644 --- a/pkg/storage/core/recognizer.go +++ b/pkg/storage/filesystem/unstructured/recognizer.go @@ -1,4 +1,4 @@ -package core +package unstructured import ( "context" @@ -6,11 +6,12 @@ import ( "fmt" "github.com/weaveworks/libgitops/pkg/serializer" + "github.com/weaveworks/libgitops/pkg/storage/core" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // KubeObjectRecognizer implements ObjectRecognizer. -var _ ObjectRecognizer = &KubeObjectRecognizer{} +var _ ObjectRecognizer = KubeObjectRecognizer{} // KubeObjectRecognizer is a simple implementation of ObjectRecognizer, that // decodes the given byte content with the assumption that it is YAML (which covers @@ -27,7 +28,7 @@ type KubeObjectRecognizer struct { AllowUnrecognized bool } -func (r *KubeObjectRecognizer) ResolveObjectID(_ context.Context, _ string, content []byte) (ObjectID, error) { +func (r KubeObjectRecognizer) ResolveObjectID(_ context.Context, _ string, content []byte) (core.ObjectID, error) { if r.Decoder == nil { return nil, errors.New("programmer error: KubeObjectRecognizer.Decoder is nil") } @@ -54,5 +55,5 @@ func (r *KubeObjectRecognizer) ResolveObjectID(_ context.Context, _ string, cont return nil, fmt.Errorf("GroupVersionKind %v not recognized by the scheme", gvk) } - return NewObjectID(metaObj.GroupVersionKind(), ObjectKeyFromObject(metaObj)), nil + return core.NewObjectID(metaObj.GroupVersionKind(), core.ObjectKeyFromObject(metaObj)), nil } diff --git a/pkg/storage/filesystem/unstructured/storage.go b/pkg/storage/filesystem/unstructured/storage.go index 91097345..89a8697e 100644 --- a/pkg/storage/filesystem/unstructured/storage.go +++ b/pkg/storage/filesystem/unstructured/storage.go @@ -6,11 +6,10 @@ import ( "fmt" "github.com/sirupsen/logrus" - "github.com/weaveworks/libgitops/pkg/storage/core" "github.com/weaveworks/libgitops/pkg/storage/filesystem" ) -func NewGeneric(storage filesystem.Storage, recognizer core.ObjectRecognizer, pathExcluder filesystem.PathExcluder) (Storage, error) { +func NewGeneric(storage filesystem.Storage, recognizer ObjectRecognizer, pathExcluder filesystem.PathExcluder) (Storage, error) { if storage == nil { return nil, fmt.Errorf("storage is mandatory") } @@ -31,7 +30,7 @@ func NewGeneric(storage filesystem.Storage, recognizer core.ObjectRecognizer, pa type Generic struct { filesystem.Storage - recognizer core.ObjectRecognizer + recognizer ObjectRecognizer mappedFileFinder MappedFileFinder pathExcluder filesystem.PathExcluder } @@ -105,7 +104,7 @@ func (s *Generic) Sync(ctx context.Context) ([]ChecksumPathID, error) { } // ObjectRecognizer returns the underlying ObjectRecognizer used. -func (s *Generic) ObjectRecognizer() core.ObjectRecognizer { +func (s *Generic) ObjectRecognizer() ObjectRecognizer { return s.recognizer } From 0d5a3b1a7ad342a81722f4ff9e939ec3f01d5712 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 1 Feb 2021 22:18:43 +0200 Subject: [PATCH 099/149] Rename MappedFileFinder to unstructured.FileFinder --- .../filesystem/unstructured/event/storage.go | 16 ++++---- .../unstructured/filefinder_mapped.go | 38 +++++++++---------- .../filesystem/unstructured/interfaces.go | 14 +++---- .../filesystem/unstructured/storage.go | 32 ++++++++-------- 4 files changed, 50 insertions(+), 50 deletions(-) diff --git a/pkg/storage/filesystem/unstructured/event/storage.go b/pkg/storage/filesystem/unstructured/event/storage.go index 58119438..049b9ed2 100644 --- a/pkg/storage/filesystem/unstructured/event/storage.go +++ b/pkg/storage/filesystem/unstructured/event/storage.go @@ -28,7 +28,7 @@ type Storage interface { const defaultEventsBufferSize = 4096 // NewManifest is a high-level constructor for a generic -// MappedFileFinder and filesystem.Storage, together with a +// unstructured.FileFinder and filesystem.Storage, together with a // inotify FileWatcher; all combined into an unstructuredevent.Storage. func NewManifest( dir string, @@ -38,7 +38,7 @@ func NewManifest( pathExcluder filesystem.PathExcluder, ) (Storage, error) { fs := filesystem.NewOSFilesystem(dir) - fileFinder := unstructured.NewGenericMappedFileFinder(contentTyper, fs) + fileFinder := unstructured.NewGenericFileFinder(contentTyper, fs) fsRaw, err := filesystem.NewGeneric(fileFinder, namespacer) if err != nil { return nil, err @@ -61,7 +61,7 @@ func NewManifest( // NewGeneric is an extended Storage implementation, which // together with the provided ObjectRecognizer and FileEventsEmitter listens for -// file events, keeps the mappings of the filesystem.Storage's MappedFileFinder +// file events, keeps the mappings of the unstructured.Storage's unstructured.FileFinder // in sync (s must use the mapped variant), and sends high-level ObjectEvents // upstream. // @@ -265,7 +265,7 @@ func (s *Generic) handleDelete(ctx context.Context, ev *fileevents.FileEvent) er // the known objects in such a way that it is able to do the reverse-lookup. For // mapped FileFinders, by this point the path should still be in the local cache, // which should make us able to get the ID before deleted from the cache. - objectID, err := s.MappedFileFinder().ObjectAt(ctx, ev.Path) + objectID, err := s.UnstructuredFileFinder().ObjectAt(ctx, ev.Path) if err != nil { return fmt.Errorf("failed to reverse lookup ID for deleted file %q: %v", ev.Path, err) } @@ -307,7 +307,7 @@ func (s *Generic) handleModifyMove(ctx context.Context, ev *fileevents.FileEvent // changes the underlying ObjectID. objectEvent := event.ObjectEventUpdate // Set the mapping if it didn't exist before; assume this is a Create event - if _, ok := s.MappedFileFinder().GetMapping(ctx, versionedID); !ok { + if _, ok := s.UnstructuredFileFinder().GetMapping(ctx, versionedID); !ok { // This is what actually determines if an Object is created, // so update the event to update.ObjectEventCreate here objectEvent = event.ObjectEventCreate @@ -333,13 +333,13 @@ func (s *Generic) sendEvent(eventType event.ObjectEventType, id core.Unversioned // will be overridden with the specified new path func (s *Generic) setMapping(ctx context.Context, id core.UnversionedObjectID, path string) { // Get the current checksum of the new file - checksum, err := s.MappedFileFinder().Filesystem().Checksum(ctx, path) + checksum, err := s.UnstructuredFileFinder().Filesystem().Checksum(ctx, path) if err != nil { logrus.Errorf("Unexpected error when getting checksum of file %q: %v", path, err) return } // Register the current state in the cache - s.MappedFileFinder().SetMapping(ctx, id, unstructured.ChecksumPath{ + s.UnstructuredFileFinder().SetMapping(ctx, id, unstructured.ChecksumPath{ Path: path, Checksum: checksum, }) @@ -347,5 +347,5 @@ func (s *Generic) setMapping(ctx context.Context, id core.UnversionedObjectID, p // deleteMapping removes a mapping a file that doesn't exist func (s *Generic) deleteMapping(ctx context.Context, id core.UnversionedObjectID) { - s.MappedFileFinder().DeleteMapping(ctx, id) + s.UnstructuredFileFinder().DeleteMapping(ctx, id) } diff --git a/pkg/storage/filesystem/unstructured/filefinder_mapped.go b/pkg/storage/filesystem/unstructured/filefinder_mapped.go index 274da22f..4e2ef937 100644 --- a/pkg/storage/filesystem/unstructured/filefinder_mapped.go +++ b/pkg/storage/filesystem/unstructured/filefinder_mapped.go @@ -15,20 +15,20 @@ var ( ErrNotTracked = errors.New("untracked object") ) -// GenericMappedFileFinder implements MappedFileFinder. -var _ MappedFileFinder = &GenericMappedFileFinder{} +// GenericFileFinder implements FileFinder. +var _ FileFinder = &GenericFileFinder{} -// NewGenericMappedFileFinder creates a new instance of GenericMappedFileFinder, -// that implements the MappedFileFinder interface. The contentTyper is optional, +// NewGenericFileFinder creates a new instance of GenericFileFinder, +// that implements the FileFinder interface. The contentTyper is optional, // by default core.DefaultContentTyper will be used. -func NewGenericMappedFileFinder(contentTyper filesystem.ContentTyper, fs filesystem.Filesystem) MappedFileFinder { +func NewGenericFileFinder(contentTyper filesystem.ContentTyper, fs filesystem.Filesystem) FileFinder { if contentTyper == nil { contentTyper = filesystem.DefaultContentTyper } if fs == nil { - panic("NewGenericMappedFileFinder: fs is mandatory") + panic("NewGenericFileFinder: fs is mandatory") } - return &GenericMappedFileFinder{ + return &GenericFileFinder{ contentTyper: contentTyper, // TODO: Support multiple branches branch: &branchImpl{}, @@ -36,7 +36,7 @@ func NewGenericMappedFileFinder(contentTyper filesystem.ContentTyper, fs filesys } } -// GenericMappedFileFinder is a generic implementation of MappedFileFinder. +// GenericFileFinder is a generic implementation of FileFinder. // It uses a ContentTyper to identify what content type a file uses. // // This implementation relies on that all information about what files exist @@ -45,7 +45,7 @@ func NewGenericMappedFileFinder(contentTyper filesystem.ContentTyper, fs filesys // // Hence, this implementation does not at the moment support creating net-new // Objects without someone calling SetMapping() first. -type GenericMappedFileFinder struct { +type GenericFileFinder struct { // Default: DefaultContentTyper contentTyper filesystem.ContentTyper fs filesystem.Filesystem @@ -53,16 +53,16 @@ type GenericMappedFileFinder struct { branch branch } -func (f *GenericMappedFileFinder) Filesystem() filesystem.Filesystem { +func (f *GenericFileFinder) Filesystem() filesystem.Filesystem { return f.fs } -func (f *GenericMappedFileFinder) ContentTyper() filesystem.ContentTyper { +func (f *GenericFileFinder) ContentTyper() filesystem.ContentTyper { return f.contentTyper } // ObjectPath gets the file path relative to the root directory -func (f *GenericMappedFileFinder) ObjectPath(ctx context.Context, id core.UnversionedObjectID) (string, error) { +func (f *GenericFileFinder) ObjectPath(ctx context.Context, id core.UnversionedObjectID) (string, error) { cp, ok := f.GetMapping(ctx, id) if !ok { // TODO: separate interface for "new creates"? @@ -73,7 +73,7 @@ func (f *GenericMappedFileFinder) ObjectPath(ctx context.Context, id core.Unvers // ObjectAt retrieves the ID containing the virtual path based // on the given physical file path. -func (f *GenericMappedFileFinder) ObjectAt(ctx context.Context, path string) (core.UnversionedObjectID, error) { +func (f *GenericFileFinder) ObjectAt(ctx context.Context, path string) (core.UnversionedObjectID, error) { // TODO: Add reverse tracking too? for gk, gkIter := range f.branch.raw() { for ns, nsIter := range gkIter.raw() { @@ -99,7 +99,7 @@ func (f *GenericMappedFileFinder) ObjectAt(ctx context.Context, path string) (co // v1.Namespace objects that exist in the system, or just the set of // different namespaces that have been set on any object belonging to // the given GroupKind. -func (f *GenericMappedFileFinder) ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.String, error) { +func (f *GenericFileFinder) ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.String, error) { m := f.branch.groupKind(gk).raw() nsSet := sets.NewString() for ns := range m { @@ -113,7 +113,7 @@ func (f *GenericMappedFileFinder) ListNamespaces(ctx context.Context, gk core.Gr // root-spaced GroupKinds, the caller must not. When namespaced, this function // must only return object IDs for that given namespace. If any of the given // rules are violated, ErrNamespacedMismatch should be returned as a wrapped error. -func (f *GenericMappedFileFinder) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) ([]core.UnversionedObjectID, error) { +func (f *GenericFileFinder) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) ([]core.UnversionedObjectID, error) { m := f.branch.groupKind(gk).namespace(namespace).raw() ids := make([]core.UnversionedObjectID, 0, len(m)) for name := range m { @@ -123,7 +123,7 @@ func (f *GenericMappedFileFinder) ListObjectIDs(ctx context.Context, gk core.Gro } // GetMapping retrieves a mapping in the system -func (f *GenericMappedFileFinder) GetMapping(ctx context.Context, id core.UnversionedObjectID) (ChecksumPath, bool) { +func (f *GenericFileFinder) GetMapping(ctx context.Context, id core.UnversionedObjectID) (ChecksumPath, bool) { cp, ok := f.branch. groupKind(id.GroupKind()). namespace(id.ObjectKey().Namespace). @@ -132,7 +132,7 @@ func (f *GenericMappedFileFinder) GetMapping(ctx context.Context, id core.Unvers } // SetMapping binds an ID's virtual path to a physical file path -func (f *GenericMappedFileFinder) SetMapping(ctx context.Context, id core.UnversionedObjectID, checksumPath ChecksumPath) { +func (f *GenericFileFinder) SetMapping(ctx context.Context, id core.UnversionedObjectID, checksumPath ChecksumPath) { f.branch. groupKind(id.GroupKind()). namespace(id.ObjectKey().Namespace). @@ -140,7 +140,7 @@ func (f *GenericMappedFileFinder) SetMapping(ctx context.Context, id core.Unvers } // ResetMappings replaces all mappings at once -func (f *GenericMappedFileFinder) ResetMappings(ctx context.Context, m map[core.UnversionedObjectID]ChecksumPath) { +func (f *GenericFileFinder) ResetMappings(ctx context.Context, m map[core.UnversionedObjectID]ChecksumPath) { f.branch = &branchImpl{} for id, cp := range m { f.SetMapping(ctx, id, cp) @@ -149,7 +149,7 @@ func (f *GenericMappedFileFinder) ResetMappings(ctx context.Context, m map[core. // DeleteMapping removes the physical file path mapping // matching the given id -func (f *GenericMappedFileFinder) DeleteMapping(ctx context.Context, id core.UnversionedObjectID) { +func (f *GenericFileFinder) DeleteMapping(ctx context.Context, id core.UnversionedObjectID) { f.branch. groupKind(id.GroupKind()). namespace(id.ObjectKey().Namespace). diff --git a/pkg/storage/filesystem/unstructured/interfaces.go b/pkg/storage/filesystem/unstructured/interfaces.go index 020b47eb..c77f516f 100644 --- a/pkg/storage/filesystem/unstructured/interfaces.go +++ b/pkg/storage/filesystem/unstructured/interfaces.go @@ -10,7 +10,7 @@ import ( // Storage is a raw Storage interface that builds on top // of Storage. It uses an ObjectRecognizer to recognize // otherwise unknown objects in unstructured files. -// The Storage must use a MappedFileFinder underneath. +// The Storage must use a unstructured.FileFinder underneath. // // Multiple Objects in the same file, or multiple Objects with the // same ID in multiple files are not supported. @@ -18,15 +18,15 @@ type Storage interface { filesystem.Storage // Sync synchronizes the current state of the filesystem with the - // cached mappings in the MappedFileFinder. + // cached mappings in the unstructured.FileFinder. Sync(ctx context.Context) ([]ChecksumPathID, error) // ObjectRecognizer returns the underlying ObjectRecognizer used. ObjectRecognizer() ObjectRecognizer // PathExcluder specifies what paths to not sync PathExcluder() filesystem.PathExcluder - // MappedFileFinder returns the underlying MappedFileFinder used. - MappedFileFinder() MappedFileFinder + // UnstructuredFileFinder returns the underlying unstructured.FileFinder used. + UnstructuredFileFinder() FileFinder } // TODO: Investigate if the ObjectRecognizer should return unversioned @@ -35,14 +35,14 @@ type ObjectRecognizer interface { ResolveObjectID(ctx context.Context, fileName string, content []byte) (core.ObjectID, error) } -// MappedFileFinder is an extension to FileFinder that allows it to have an internal +// FileFinder is an extension to filesystem.FileFinder that allows it to have an internal // cache with mappings between UnversionedObjectID and a ChecksumPath. This allows // higher-order interfaces to manage Objects in files in an unorganized directory // (e.g. a Git repo). // // Multiple Objects in the same file, or multiple Objects with the // same ID in multiple files are not supported. -type MappedFileFinder interface { +type FileFinder interface { filesystem.FileFinder // GetMapping retrieves a mapping in the system. @@ -57,7 +57,7 @@ type MappedFileFinder interface { } // ChecksumPath is a tuple of a given Checksum and relative file Path, -// for use in MappedFileFinder. +// for use in unstructured.FileFinder. type ChecksumPath struct { // Checksum is the checksum of the file at the given path. // diff --git a/pkg/storage/filesystem/unstructured/storage.go b/pkg/storage/filesystem/unstructured/storage.go index 89a8697e..7e39a5e3 100644 --- a/pkg/storage/filesystem/unstructured/storage.go +++ b/pkg/storage/filesystem/unstructured/storage.go @@ -16,29 +16,29 @@ func NewGeneric(storage filesystem.Storage, recognizer ObjectRecognizer, pathExc if recognizer == nil { return nil, fmt.Errorf("recognizer is mandatory") } - mappedFileFinder, ok := storage.FileFinder().(MappedFileFinder) + fileFinder, ok := storage.FileFinder().(FileFinder) if !ok { - return nil, errors.New("the given filesystem.Storage must use a MappedFileFinder") + return nil, errors.New("the given filesystem.Storage must use a unstructured.FileFinder") } return &Generic{ - Storage: storage, - recognizer: recognizer, - mappedFileFinder: mappedFileFinder, - pathExcluder: pathExcluder, + Storage: storage, + recognizer: recognizer, + fileFinder: fileFinder, + pathExcluder: pathExcluder, }, nil } type Generic struct { filesystem.Storage - recognizer ObjectRecognizer - mappedFileFinder MappedFileFinder - pathExcluder filesystem.PathExcluder + recognizer ObjectRecognizer + fileFinder FileFinder + pathExcluder filesystem.PathExcluder } // Sync synchronizes the current state of the filesystem with the -// cached mappings in the MappedFileFinder. +// cached mappings in the underlying unstructured.FileFinder. func (s *Generic) Sync(ctx context.Context) ([]ChecksumPathID, error) { - fileFinder := s.MappedFileFinder() + fileFinder := s.UnstructuredFileFinder() // List all valid files in the fs files, err := filesystem.ListValidFilesInFilesystem( @@ -52,7 +52,7 @@ func (s *Generic) Sync(ctx context.Context) ([]ChecksumPathID, error) { } // Send SYNC events for all files (and fill the mappings - // of the MappedFileFinder) before starting to monitor changes + // of the unstructured.FileFinder) before starting to monitor changes updatedFiles := make([]ChecksumPathID, 0, len(files)) for _, filePath := range files { // Get the current checksum of the file @@ -93,7 +93,7 @@ func (s *Generic) Sync(ctx context.Context) ([]ChecksumPathID, error) { Checksum: currentChecksum, Path: filePath, } - s.MappedFileFinder().SetMapping(ctx, id, cp) + fileFinder.SetMapping(ctx, id, cp) // Add to the slice which we'll return updatedFiles = append(updatedFiles, ChecksumPathID{ ChecksumPath: cp, @@ -113,7 +113,7 @@ func (s *Generic) PathExcluder() filesystem.PathExcluder { return s.pathExcluder } -// MappedFileFinder returns the underlying MappedFileFinder used. -func (s *Generic) MappedFileFinder() MappedFileFinder { - return s.mappedFileFinder +// UnstructuredFileFinder returns the underlying unstructured.FileFinder used. +func (s *Generic) UnstructuredFileFinder() FileFinder { + return s.fileFinder } From 2b936f9f3ab217f7f374f865de23a780f69394fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 1 Feb 2021 22:34:27 +0200 Subject: [PATCH 100/149] Move the (Tx|Commit)HookChains to the transactional.Client instead of the BranchManager. --- cmd/sample-gitops/main.go | 4 +-- pkg/storage/client/transactional/client.go | 30 ++++++++++++++----- .../transactional/distributed/client.go | 6 ++-- .../transactional/distributed/git/git.go | 21 +++---------- .../transactional/distributed/interfaces.go | 9 +++--- .../client/transactional/interfaces.go | 10 +++---- pkg/storage/client/transactional/tx_common.go | 5 ++-- 7 files changed, 43 insertions(+), 42 deletions(-) diff --git a/cmd/sample-gitops/main.go b/cmd/sample-gitops/main.go index 65d1e7e6..4bc80367 100644 --- a/cmd/sample-gitops/main.go +++ b/cmd/sample-gitops/main.go @@ -198,10 +198,10 @@ func run(identityFile, gitURL, ghToken, authorName, authorEmail, prMilestone str return err } - // Register the PR CommitHook with the BranchManager + // Register the PR CommitHook with the distributed transaction Client // This needs to be done after the distributed.NewClient call, so // it has been able to handle pushing of the branch first. - localClone.CommitHookChain().Register(prCommitHook) + txClient.CommitHookChain().Register(prCommitHook) // Start the sync loop in the background txClient.StartResyncLoop(ctx, 15*time.Second) diff --git a/pkg/storage/client/transactional/client.go b/pkg/storage/client/transactional/client.go index e9b034e7..9a615414 100644 --- a/pkg/storage/client/transactional/client.go +++ b/pkg/storage/client/transactional/client.go @@ -26,11 +26,13 @@ func NewGeneric(c client.Client, manager BranchManager, merger BranchMerger) (Cl return nil, fmt.Errorf("%w: manager is required", core.ErrInvalidParameter) } return &Generic{ - c: c, - txs: make(map[string]*txLock), - txsMu: &sync.Mutex{}, - manager: manager, - merger: merger, + c: c, + txs: make(map[string]*txLock), + txsMu: &sync.Mutex{}, + txHooks: &MultiTransactionHook{}, + commitHooks: &MultiCommitHook{}, + manager: manager, + merger: merger, }, nil } @@ -42,6 +44,10 @@ type Generic struct { // txsMu guards reads and writes of txs txsMu *sync.Mutex + // Hooks + txHooks TransactionHookChain + commitHooks CommitHookChain + // +optional merger BranchMerger // +required @@ -191,7 +197,7 @@ func (c *Generic) cleanupAfterTx(ctx context.Context, info *TxInfo) error { c.manager.ResetToCleanBranch(ctx, info.Base), // TODO: should this be in its own goroutine to switch back to main // ASAP? - c.manager.TransactionHookChain().PostTransactionHook(ctx, *info), + c.TransactionHookChain().PostTransactionHook(ctx, *info), }) } @@ -207,6 +213,14 @@ func (c *Generic) BranchManager() BranchManager { return c.manager } +func (c *Generic) TransactionHookChain() TransactionHookChain { + return c.txHooks +} + +func (c *Generic) CommitHookChain() CommitHookChain { + return c.commitHooks +} + func (c *Generic) Transaction(ctx context.Context, opts ...TxOption) Tx { tx, err := c.transaction(ctx, opts...) if err != nil { @@ -253,7 +267,7 @@ func (c *Generic) transaction(ctx context.Context, opts ...TxOption) (Tx, error) ctxWithDeadline, cleanupFunc := c.initTx(ctx, info) // Run pre-tx checks - err = c.manager.TransactionHookChain().PreTransactionHook(ctxWithDeadline, info) + err = c.TransactionHookChain().PreTransactionHook(ctxWithDeadline, info) return &txImpl{ &txCommon{ @@ -307,7 +321,7 @@ func (c *Generic) branchTransaction(ctx context.Context, headBranch string, opts // Run pre-tx checks and create the new branch err = utilerrs.NewAggregate([]error{ - c.manager.TransactionHookChain().PreTransactionHook(ctxWithDeadline, info), + c.TransactionHookChain().PreTransactionHook(ctxWithDeadline, info), c.manager.CreateBranch(ctxWithDeadline, headBranch), }) diff --git a/pkg/storage/client/transactional/distributed/client.go b/pkg/storage/client/transactional/distributed/client.go index 665c6fdb..a92e54a9 100644 --- a/pkg/storage/client/transactional/distributed/client.go +++ b/pkg/storage/client/transactional/distributed/client.go @@ -32,9 +32,9 @@ func NewClient(c transactional.Client, remote Remote, opts ...ClientOption) (*Ge branchLocksMu: &sync.Mutex{}, } - // Register ourselves to hook into the branch manager's operations - c.BranchManager().CommitHookChain().Register(g) - c.BranchManager().TransactionHookChain().Register(g) + // Register ourselves to hook into the transactional.Client's operations + c.CommitHookChain().Register(g) + c.TransactionHookChain().Register(g) return g, nil } diff --git a/pkg/storage/client/transactional/distributed/git/git.go b/pkg/storage/client/transactional/distributed/git/git.go index 6fb14d83..4529d5a4 100644 --- a/pkg/storage/client/transactional/distributed/git/git.go +++ b/pkg/storage/client/transactional/distributed/git/git.go @@ -65,12 +65,10 @@ func NewLocalClone(ctx context.Context, repoRef gitprovider.RepositoryRef, opts log.Debugf("Created temporary directory for the git clone at %q", tmpDir) d := &LocalClone{ - repoRef: repoRef, - opts: opts, - cloneDir: tmpDir, - lock: &sync.Mutex{}, - commitHooks: &transactional.MultiCommitHook{}, - txHooks: &transactional.MultiTransactionHook{}, + repoRef: repoRef, + opts: opts, + cloneDir: tmpDir, + lock: &sync.Mutex{}, } log.Trace("URL endpoint parsed and authentication method chosen") @@ -104,17 +102,6 @@ type LocalClone struct { // the lock for git operations (so no ops are done simultaneously) lock *sync.Mutex - - commitHooks transactional.CommitHookChain - txHooks transactional.TransactionHookChain -} - -func (d *LocalClone) CommitHookChain() transactional.CommitHookChain { - return d.commitHooks -} - -func (d *LocalClone) TransactionHookChain() transactional.TransactionHookChain { - return d.txHooks } func (d *LocalClone) Dir() string { diff --git a/pkg/storage/client/transactional/distributed/interfaces.go b/pkg/storage/client/transactional/distributed/interfaces.go index 81105990..15272b2e 100644 --- a/pkg/storage/client/transactional/distributed/interfaces.go +++ b/pkg/storage/client/transactional/distributed/interfaces.go @@ -8,14 +8,13 @@ import ( ) // Client is a client that can sync state with a remote in a transactional way. +// +// A distributed.Client is itself most likely both a CommitHook and TransactionHook; if so, +// it should be automatically registered with the transactional.Client's *HookChain in the +// distributed.Client's constructor. type Client interface { // The distributed Client extends the transactional Client transactional.Client - // This Client is itself both a CommitHook and TransactionHook; these should - // be automatically registered with the transactional.Client's BranchManager - // in this Client's constructor. - transactional.CommitHook - transactional.TransactionHook // StartResyncLoop starts a resync loop for the given branches for // the given interval. diff --git a/pkg/storage/client/transactional/interfaces.go b/pkg/storage/client/transactional/interfaces.go index cea5b00c..dac57cc6 100644 --- a/pkg/storage/client/transactional/interfaces.go +++ b/pkg/storage/client/transactional/interfaces.go @@ -14,6 +14,11 @@ type Client interface { // BranchMerger is optional. BranchMerger() BranchMerger + // CommitHookChain is a chain of hooks that are run before and after a commit is made. + CommitHookChain() CommitHookChain + // TransactionHookChain is a chain of hooks that are run before and after a transaction. + TransactionHookChain() TransactionHookChain + // Transaction creates a new transaction on the branch stored in the context, so that // no other writes to that branch can take place meanwhile. Transaction(ctx context.Context, opts ...TxOption) Tx @@ -32,11 +37,6 @@ type BranchManager interface { ResetToCleanBranch(ctx context.Context, branch string) error // Commit creates a new commit for the branch stored in the context. Commit(ctx context.Context, commit Commit) error - - // CommitHookChain must be non-nil, but can be a no-op - CommitHookChain() CommitHookChain - // TransactionHookChain must be non-nil, but can be a no-op - TransactionHookChain() TransactionHookChain } type BranchMerger interface { diff --git a/pkg/storage/client/transactional/tx_common.go b/pkg/storage/client/transactional/tx_common.go index 3448c811..7c33849b 100644 --- a/pkg/storage/client/transactional/tx_common.go +++ b/pkg/storage/client/transactional/tx_common.go @@ -13,6 +13,7 @@ type txCommon struct { err error c client.Client manager BranchManager + commitHook CommitHook ctx context.Context ops []txFunc info TxInfo @@ -33,7 +34,7 @@ func (tx *txCommon) Abort(err error) error { func (tx *txCommon) handlePreCommit(c Commit) txFunc { return func() error { - return tx.manager.CommitHookChain().PreCommitHook(tx.ctx, c, tx.info) + return tx.commitHook.PreCommitHook(tx.ctx, c, tx.info) } } @@ -45,7 +46,7 @@ func (tx *txCommon) commit(c Commit) txFunc { func (tx *txCommon) handlePostCommit(c Commit) txFunc { return func() error { - return tx.manager.CommitHookChain().PostCommitHook(tx.ctx, c, tx.info) + return tx.commitHook.PostCommitHook(tx.ctx, c, tx.info) } } From c0d0ff2345af88ab98178c1e5e6a36d02b85aa9b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 1 Feb 2021 22:41:23 +0200 Subject: [PATCH 101/149] Fix TODO in the simple filefinder. --- pkg/storage/filesystem/filefinder_simple.go | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/pkg/storage/filesystem/filefinder_simple.go b/pkg/storage/filesystem/filefinder_simple.go index 5317bc28..0d6489e7 100644 --- a/pkg/storage/filesystem/filefinder_simple.go +++ b/pkg/storage/filesystem/filefinder_simple.go @@ -2,7 +2,6 @@ package filesystem import ( "context" - "errors" "fmt" "os" "path/filepath" @@ -79,6 +78,8 @@ var _ FileFinder = &SimpleFileFinder{} // } // // is resolved by the FileExtensionResolver, for the given ContentType. +// If is an empty string (as when "apiVersion: v1" is used); will +// be set to "core". // // This FileFinder does not support the ObjectAt method. type SimpleFileFinder struct { @@ -99,8 +100,6 @@ type SimpleFileFinderOptions struct { FileExtensionResolver FileExtensionResolver } -// TODO: Use group name "core" if group is "" to support core k8s objects. - func (f *SimpleFileFinder) Filesystem() Filesystem { return f.fs } @@ -138,14 +137,19 @@ func (f *SimpleFileFinder) kindKeyPath(gk core.GroupKind) string { // .// return filepath.Join(gk.Kind) } + // Fall back to the "core/v1" storage path for "apiVersion: v1" + group := gk.Group + if len(group) == 0 { + group = "core" + } // ./// - return filepath.Join(gk.Group, gk.Kind) + return filepath.Join(group, gk.Kind) } // ObjectAt retrieves the ID containing the virtual path based // on the given physical file path. func (f *SimpleFileFinder) ObjectAt(ctx context.Context, path string) (core.UnversionedObjectID, error) { - return nil, errors.New("not implemented") + return nil, core.ErrNotImplemented } func (f *SimpleFileFinder) ext() (string, error) { From 097e9d1d73156e132a855ff7d9a1c3d91407eefe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 1 Feb 2021 22:44:17 +0200 Subject: [PATCH 102/149] Minor comment --- pkg/storage/client/transactional/tx_ops.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pkg/storage/client/transactional/tx_ops.go b/pkg/storage/client/transactional/tx_ops.go index e0a6c375..de066d3c 100644 --- a/pkg/storage/client/transactional/tx_ops.go +++ b/pkg/storage/client/transactional/tx_ops.go @@ -6,6 +6,9 @@ import ( "github.com/weaveworks/libgitops/pkg/storage/core" ) +// Implement the required "fluent/functional" methods on BranchTx. +// Go doesn't have generics; hence we need to do this twice. + func (tx *txImpl) Get(key core.ObjectKey, obj core.Object) Tx { return tx.Custom(func(ctx context.Context) error { return tx.c.Get(ctx, key, obj) @@ -54,7 +57,8 @@ func (tx *txImpl) PatchStatus(obj core.Object, patch core.Patch, opts ...core.Pa }) } -// TODO +// Implement the required "fluent/functional" methods on BranchTx. +// Go doesn't have generics; hence we need to do this twice. func (tx *txBranchImpl) Get(key core.ObjectKey, obj core.Object) BranchTx { return tx.Custom(func(ctx context.Context) error { From 6ec2841d824b802be1d4bc845234d8ba724332aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 1 Feb 2021 22:44:24 +0200 Subject: [PATCH 103/149] Minor comment --- pkg/storage/filesystem/fileevents/inotify/filewatcher.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/pkg/storage/filesystem/fileevents/inotify/filewatcher.go b/pkg/storage/filesystem/fileevents/inotify/filewatcher.go index 58d85186..162820bd 100644 --- a/pkg/storage/filesystem/fileevents/inotify/filewatcher.go +++ b/pkg/storage/filesystem/fileevents/inotify/filewatcher.go @@ -213,8 +213,9 @@ func (w *FileWatcher) Close() error { return nil } -// Suspend enables a one-time suspend of the given path -// TODO: clarify how the path should be formatted +// Suspend enables a one-time suspend for any event from the given path. +// The path must be relative to the root directory, i.e. computed as +// path = filepath.Rel(, ). func (w *FileWatcher) Suspend(_ context.Context, path string) { w.suspendFilesMu.Lock() defer w.suspendFilesMu.Unlock() From 4d3aeb16b2e899d0304257866d3196a937ce3317 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 1 Feb 2021 23:52:26 +0200 Subject: [PATCH 104/149] Add sets of ObjectIDs for ease of use elsewhere in this system --- pkg/storage/core/set_objectid.go | 82 ++++++++++++++++++++ pkg/storage/core/set_unversioned_objectid.go | 82 ++++++++++++++++++++ 2 files changed, 164 insertions(+) create mode 100644 pkg/storage/core/set_objectid.go create mode 100644 pkg/storage/core/set_unversioned_objectid.go diff --git a/pkg/storage/core/set_objectid.go b/pkg/storage/core/set_objectid.go new file mode 100644 index 00000000..53df255f --- /dev/null +++ b/pkg/storage/core/set_objectid.go @@ -0,0 +1,82 @@ +package core + +import ( + "k8s.io/apimachinery/pkg/util/sets" +) + +// This is a copy of set_unversioned_objectid.go; needed as Go doesn't have generics. + +// ObjectIDSet is a set of ObjectIDs +type ObjectIDSet interface { + // Has returns true if the object ID is in the set + Has(id ObjectID) bool + // HasAny returns true if any of the object IDs are in the set + HasAny(ids ...ObjectID) bool + // InsertUnique returns false if any of the object IDs are in the set already, + // or true if none of the given object IDs exist in the set yet. If the return value + // is true, the IDs have been added to the set. + InsertUnique(ids ...ObjectID) bool + // Insert inserts the given object IDs into the set + Insert(ids ...ObjectID) ObjectIDSet + // Delete deletes the given object IDs from the set + Delete(ids ...ObjectID) ObjectIDSet + // List lists the given object IDs of the set + List() []ObjectID + // Len returns the length of the set + Len() int +} + +// NewObjectIDSet creates a new ObjectIDSet +func NewObjectIDSet(ids ...ObjectID) ObjectIDSet { + return (objectIDSet{}).Insert(ids...) +} + +type objectIDSet map[ObjectID]sets.Empty + +func (s objectIDSet) Has(id ObjectID) bool { + _, found := s[id] + return found +} + +func (s objectIDSet) HasAny(ids ...ObjectID) bool { + for _, id := range ids { + if s.Has(id) { + return true + } + } + return false +} + +func (s objectIDSet) InsertUnique(ids ...ObjectID) bool { + if s.HasAny(ids...) { + return false + } + s.Insert(ids...) + return true +} + +func (s objectIDSet) Insert(ids ...ObjectID) ObjectIDSet { + for _, id := range ids { + s[id] = sets.Empty{} + } + return s +} + +func (s objectIDSet) Delete(ids ...ObjectID) ObjectIDSet { + for _, id := range ids { + delete(s, id) + } + return s +} + +func (s objectIDSet) List() []ObjectID { + list := make([]ObjectID, 0, len(s)) + for id := range s { + list = append(list, id) + } + return list +} + +func (s objectIDSet) Len() int { + return len(s) +} diff --git a/pkg/storage/core/set_unversioned_objectid.go b/pkg/storage/core/set_unversioned_objectid.go new file mode 100644 index 00000000..98f012a1 --- /dev/null +++ b/pkg/storage/core/set_unversioned_objectid.go @@ -0,0 +1,82 @@ +package core + +import ( + "k8s.io/apimachinery/pkg/util/sets" +) + +// This is a copy of set_unversioned_objectid.go; needed as Go doesn't have generics. + +// UnversionedObjectIDSet is a set of UnversionedObjectIDs +type UnversionedObjectIDSet interface { + // Has returns true if the object ID is in the set + Has(id UnversionedObjectID) bool + // HasAny returns true if any of the object IDs are in the set + HasAny(ids ...UnversionedObjectID) bool + // InsertUnique returns false if any of the object IDs are in the set already, + // or true if none of the given object IDs exist in the set yet. If the return value + // is true, the IDs have been added to the set. + InsertUnique(ids ...UnversionedObjectID) bool + // Insert inserts the given object IDs into the set + Insert(ids ...UnversionedObjectID) UnversionedObjectIDSet + // Delete deletes the given object IDs from the set + Delete(ids ...UnversionedObjectID) UnversionedObjectIDSet + // List lists the given object IDs of the set + List() []UnversionedObjectID + // Len returns the length of the set + Len() int +} + +// NewUnversionedObjectIDSet creates a new UnversionedObjectIDSet +func NewUnversionedObjectIDSet(ids ...UnversionedObjectID) UnversionedObjectIDSet { + return (unversionedObjectIDSet{}).Insert(ids...) +} + +type unversionedObjectIDSet map[UnversionedObjectID]sets.Empty + +func (s unversionedObjectIDSet) Has(id UnversionedObjectID) bool { + _, found := s[id] + return found +} + +func (s unversionedObjectIDSet) HasAny(ids ...UnversionedObjectID) bool { + for _, id := range ids { + if s.Has(id) { + return true + } + } + return false +} + +func (s unversionedObjectIDSet) InsertUnique(ids ...UnversionedObjectID) bool { + if s.HasAny(ids...) { + return false + } + s.Insert(ids...) + return true +} + +func (s unversionedObjectIDSet) Insert(ids ...UnversionedObjectID) UnversionedObjectIDSet { + for _, id := range ids { + s[id] = sets.Empty{} + } + return s +} + +func (s unversionedObjectIDSet) Delete(ids ...UnversionedObjectID) UnversionedObjectIDSet { + for _, id := range ids { + delete(s, id) + } + return s +} + +func (s unversionedObjectIDSet) List() []UnversionedObjectID { + list := make([]UnversionedObjectID, 0, len(s)) + for id := range s { + list = append(list, id) + } + return list +} + +func (s unversionedObjectIDSet) Len() int { + return len(s) +} From 8f4b91a2676255de8797cd6156afac56db1ec8a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 1 Feb 2021 23:52:54 +0200 Subject: [PATCH 105/149] Implement fmt.Stringer for ObjectIDs --- pkg/storage/core/interfaces.go | 1 + pkg/storage/core/objectid.go | 18 +++++++++++++++++- 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/pkg/storage/core/interfaces.go b/pkg/storage/core/interfaces.go index cbe1b21d..546b7caf 100644 --- a/pkg/storage/core/interfaces.go +++ b/pkg/storage/core/interfaces.go @@ -41,6 +41,7 @@ type UnversionedObjectID interface { ObjectKey() ObjectKey WithVersion(version string) ObjectID + String() string // Implements fmt.Stringer } // ObjectID is a superset of UnversionedObjectID, that also specifies an exact version. diff --git a/pkg/storage/core/objectid.go b/pkg/storage/core/objectid.go index 8dc747be..f23c4db6 100644 --- a/pkg/storage/core/objectid.go +++ b/pkg/storage/core/objectid.go @@ -1,6 +1,10 @@ package core -import "k8s.io/apimachinery/pkg/runtime/schema" +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime/schema" +) // NewUnversionedObjectID creates a new UnversionedObjectID from the given GroupKind and ObjectKey. func NewUnversionedObjectID(gk GroupKind, key ObjectKey) UnversionedObjectID { @@ -15,6 +19,12 @@ type unversionedObjectID struct { func (o unversionedObjectID) GroupKind() GroupKind { return o.gk } func (o unversionedObjectID) ObjectKey() ObjectKey { return o.key } func (o unversionedObjectID) WithVersion(version string) ObjectID { return objectID{o, version} } +func (o unversionedObjectID) String() string { + if o.key.Namespace == "" { + return fmt.Sprintf("UnversionedObjectID: groupkind=%s name=%s", o.gk, o.key.Name) + } + return fmt.Sprintf("UnversionedObjectID: groupkind=%s name=%s ns=%s", o.gk, o.key.Name, o.key.Namespace) +} // NewObjectID creates a new ObjectID from the given GroupVersionKind and ObjectKey. func NewObjectID(gvk GroupVersionKind, key ObjectKey) ObjectID { @@ -27,3 +37,9 @@ type objectID struct { } func (o objectID) GroupVersionKind() schema.GroupVersionKind { return o.gk.WithVersion(o.version) } +func (o objectID) String() string { + if o.key.Namespace == "" { + return fmt.Sprintf("ObjectID: groupkind=%s version=%s name=%s", o.gk, o.version, o.key.Name) + } + return fmt.Sprintf("ObjectID: groupkind=%s version=%s name=%s ns=%s", o.gk, o.version, o.key.Name, o.key.Namespace) +} From 3928b90cee2b755f7f26c1e7262f6f21ae9917b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 1 Feb 2021 23:53:59 +0200 Subject: [PATCH 106/149] Make the ObjectRecognizer interface better; make it use FrameReaders. --- .../filesystem/unstructured/event/storage.go | 18 ++--- .../filesystem/unstructured/interfaces.go | 8 +- .../filesystem/unstructured/recognizer.go | 74 ++++++++++++------- .../filesystem/unstructured/storage.go | 58 ++++++++++++--- 4 files changed, 108 insertions(+), 50 deletions(-) diff --git a/pkg/storage/filesystem/unstructured/event/storage.go b/pkg/storage/filesystem/unstructured/event/storage.go index 049b9ed2..bd96fedc 100644 --- a/pkg/storage/filesystem/unstructured/event/storage.go +++ b/pkg/storage/filesystem/unstructured/event/storage.go @@ -278,16 +278,16 @@ func (s *Generic) handleDelete(ctx context.Context, ev *fileevents.FileEvent) er } func (s *Generic) handleModifyMove(ctx context.Context, ev *fileevents.FileEvent) error { - // Read the content of this modified, moved or created file - content, err := s.FileFinder().Filesystem().ReadFile(ctx, ev.Path) + // Read and recognize the file + versionedID, err := unstructured.ReadAndRecognizeFile( + ctx, + s.UnstructuredFileFinder().Filesystem(), + s.UnstructuredFileFinder().ContentTyper(), + s.ObjectRecognizer(), + ev.Path, + ) if err != nil { - return fmt.Errorf("could not read %q: %v", ev.Path, err) - } - - // Try to recognize the object - versionedID, err := s.ObjectRecognizer().ResolveObjectID(ctx, ev.Path, content) - if err != nil { - return fmt.Errorf("did not recognize object at path %q: %v", ev.Path, err) + return err } // If the file was just moved around, just overwrite the earlier mapping diff --git a/pkg/storage/filesystem/unstructured/interfaces.go b/pkg/storage/filesystem/unstructured/interfaces.go index c77f516f..66396d13 100644 --- a/pkg/storage/filesystem/unstructured/interfaces.go +++ b/pkg/storage/filesystem/unstructured/interfaces.go @@ -3,6 +3,7 @@ package unstructured import ( "context" + "github.com/weaveworks/libgitops/pkg/serializer" "github.com/weaveworks/libgitops/pkg/storage/core" "github.com/weaveworks/libgitops/pkg/storage/filesystem" ) @@ -29,10 +30,11 @@ type Storage interface { UnstructuredFileFinder() FileFinder } -// TODO: Investigate if the ObjectRecognizer should return unversioned -// or versioned ObjectID's +// ObjectRecognizer recognizes objects stored in files. type ObjectRecognizer interface { - ResolveObjectID(ctx context.Context, fileName string, content []byte) (core.ObjectID, error) + // RecognizeObjectIDs returns the ObjectIDs present in the file with the given name, + // content type and content (in the FrameReader). + RecognizeObjectIDs(fileName string, fr serializer.FrameReader) (core.ObjectIDSet, error) } // FileFinder is an extension to filesystem.FileFinder that allows it to have an internal diff --git a/pkg/storage/filesystem/unstructured/recognizer.go b/pkg/storage/filesystem/unstructured/recognizer.go index e4b2d441..68e9ce4d 100644 --- a/pkg/storage/filesystem/unstructured/recognizer.go +++ b/pkg/storage/filesystem/unstructured/recognizer.go @@ -1,9 +1,9 @@ package unstructured import ( - "context" "errors" "fmt" + "io" "github.com/weaveworks/libgitops/pkg/serializer" "github.com/weaveworks/libgitops/pkg/storage/core" @@ -14,46 +14,64 @@ import ( var _ ObjectRecognizer = KubeObjectRecognizer{} // KubeObjectRecognizer is a simple implementation of ObjectRecognizer, that -// decodes the given byte content with the assumption that it is YAML (which covers -// both YAML and JSON formats) into a *metav1.PartialObjectMetadata, which allows -// extracting the ObjectID from any Kubernetes API Machinery-compatible Object. +// decodes the given (possibly multi-frame file) into a *metav1.PartialObjectMetadata, +// which allows extracting the ObjectID from any Kube API Machinery-compatible Object. // // This operation works even though *metav1.PartialObjectMetadata is not registered // with the underlying Scheme in any way. +// +// This implementation enforces that .apiVersion, .kind and .metadata.name fields are +// non-empty. type KubeObjectRecognizer struct { - // Decoder is a required field in order for ResolveObjectID to function. + // Decoder is a required field in order for RecognizeObjectIDs to function. Decoder serializer.Decoder // AllowUnrecognized controls whether this implementation allows recognizing // GVK combinations not known to the underlying Scheme. Default: false AllowUnrecognized bool + // AllowDuplicates controls whether this implementation allows two exactly similar + // ObjectIDs in the same file. Default: false + AllowDuplicates bool } -func (r KubeObjectRecognizer) ResolveObjectID(_ context.Context, _ string, content []byte) (core.ObjectID, error) { +func (r KubeObjectRecognizer) RecognizeObjectIDs(_ string, fr serializer.FrameReader) (core.ObjectIDSet, error) { if r.Decoder == nil { return nil, errors.New("programmer error: KubeObjectRecognizer.Decoder is nil") } - metaObj := &metav1.PartialObjectMetadata{} - err := r.Decoder.DecodeInto( - serializer.NewSingleFrameReader(content, serializer.ContentTypeYAML), - metaObj, - ) - if err != nil { - return nil, err - } - // Validate the object info - gvk := metaObj.GroupVersionKind() - if gvk.Group == "" && gvk.Version == "" { - return nil, fmt.Errorf(".apiVersion field must not be empty") - } - if gvk.Kind == "" { - return nil, fmt.Errorf(".kind field must not be empty") - } - if metaObj.Kind == "" { - return nil, fmt.Errorf(".metadata.name field must not be empty") - } - if !r.AllowUnrecognized && !r.Decoder.SchemeLock().Scheme().Recognizes(gvk) { - return nil, fmt.Errorf("GroupVersionKind %v not recognized by the scheme", gvk) + ids := core.NewObjectIDSet() + + for { + metaObj := &metav1.PartialObjectMetadata{} + err := r.Decoder.DecodeInto(fr, metaObj) + if err == io.EOF { + // If we encountered io.EOF, we know that all is fine and we can exit the for loop and return + break + } else if err != nil { + return nil, err + } + + // Validate the object info + gvk := metaObj.GroupVersionKind() + if gvk.Group == "" && gvk.Version == "" { + return nil, fmt.Errorf(".apiVersion field must not be empty") + } + if gvk.Kind == "" { + return nil, fmt.Errorf(".kind field must not be empty") + } + if metaObj.Name == "" { + return nil, fmt.Errorf(".metadata.name field must not be empty") + } + if !r.AllowUnrecognized && !r.Decoder.SchemeLock().Scheme().Recognizes(gvk) { + return nil, fmt.Errorf("GroupVersionKind %v not recognized by the scheme", gvk) + } + + // Create the ObjectID + id := core.NewObjectID(metaObj.GroupVersionKind(), core.ObjectKeyFromObject(metaObj)) + // Insert it into the set; but error if AllowDuplicates==false and it already existed. + // Important: As InsertUnique mutates ids, it must be the first if case + if !ids.InsertUnique(id) && !r.AllowDuplicates { + return nil, fmt.Errorf("invalid file: two Objects with the same ID: %s", id) + } } - return core.NewObjectID(metaObj.GroupVersionKind(), core.ObjectKeyFromObject(metaObj)), nil + return ids, nil } diff --git a/pkg/storage/filesystem/unstructured/storage.go b/pkg/storage/filesystem/unstructured/storage.go index 7e39a5e3..5f10a4c8 100644 --- a/pkg/storage/filesystem/unstructured/storage.go +++ b/pkg/storage/filesystem/unstructured/storage.go @@ -6,6 +6,8 @@ import ( "fmt" "github.com/sirupsen/logrus" + "github.com/weaveworks/libgitops/pkg/serializer" + "github.com/weaveworks/libgitops/pkg/storage/core" "github.com/weaveworks/libgitops/pkg/storage/filesystem" ) @@ -74,17 +76,16 @@ func (s *Generic) Sync(ctx context.Context) ([]ChecksumPathID, error) { } } - // If the file is not known to the FileFinder yet, or if the checksum - // was empty, read the file, and recognize it. - content, err := s.FileFinder().Filesystem().ReadFile(ctx, filePath) + // Read and recognize the file + id, err := ReadAndRecognizeFile( + ctx, + fileFinder.Filesystem(), + fileFinder.ContentTyper(), + s.recognizer, + filePath, + ) if err != nil { - logrus.Warnf("Ignoring %q: %v", filePath, err) - continue - } - - id, err := s.recognizer.ResolveObjectID(ctx, filePath, content) - if err != nil { - logrus.Warnf("Could not recognize object ID in %q: %v", filePath, err) + logrus.Warn(err) continue } @@ -117,3 +118,40 @@ func (s *Generic) PathExcluder() filesystem.PathExcluder { func (s *Generic) UnstructuredFileFinder() FileFinder { return s.fileFinder } + +// ReadAndRecognizeFile reads the given file and its content type; and then recognizes it. +// It only supports one ObjectID per file at the moment. +func ReadAndRecognizeFile( + ctx context.Context, + fs filesystem.Filesystem, + contentTyper filesystem.ContentTyper, + recognizer ObjectRecognizer, + filePath string, +) (core.ObjectID, error) { + // If the file is not known to the FileFinder yet, or if the checksum + // was empty, read the file, and recognize it. + content, err := fs.ReadFile(ctx, filePath) + if err != nil { + return nil, fmt.Errorf("Could not read file %q: %v", filePath, err) + } + // Get the content type for this file so that we can read it properly + ct, err := contentTyper.ContentTypeForPath(ctx, fs, filePath) + if err != nil { + return nil, fmt.Errorf("Could not get content type for file %q: %v", filePath, err) + } + // TODO: In the future this NewFrameReader should come from an interface, not + // directly from the hard-coded serializer package. + fr := serializer.NewFrameReader(ct, serializer.FromBytes(content)) + // Recognize all IDs in the file + ids, err := recognizer.RecognizeObjectIDs(filePath, fr) + if err != nil { + return nil, fmt.Errorf("Could not recognize object IDs in %q: %v", filePath, err) + } + // For now; we only support single-frame files + // TODO: Change this. + if ids.Len() != 1 { + return nil, fmt.Errorf("File %q contained multiple objects; for now only single-frame files are supported", filePath) + } + // Return that one ID + return ids.List()[0], nil +} From ee9fea66b95dde0d62e8041044f97617a203a01d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 1 Feb 2021 23:58:00 +0200 Subject: [PATCH 107/149] Small optimization; set a given size on the set from the initial ids; and then grow over time --- pkg/storage/core/set_objectid.go | 2 +- pkg/storage/core/set_unversioned_objectid.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/storage/core/set_objectid.go b/pkg/storage/core/set_objectid.go index 53df255f..c777b1d7 100644 --- a/pkg/storage/core/set_objectid.go +++ b/pkg/storage/core/set_objectid.go @@ -28,7 +28,7 @@ type ObjectIDSet interface { // NewObjectIDSet creates a new ObjectIDSet func NewObjectIDSet(ids ...ObjectID) ObjectIDSet { - return (objectIDSet{}).Insert(ids...) + return (make(objectIDSet, len(ids))).Insert(ids...) } type objectIDSet map[ObjectID]sets.Empty diff --git a/pkg/storage/core/set_unversioned_objectid.go b/pkg/storage/core/set_unversioned_objectid.go index 98f012a1..99a26418 100644 --- a/pkg/storage/core/set_unversioned_objectid.go +++ b/pkg/storage/core/set_unversioned_objectid.go @@ -28,7 +28,7 @@ type UnversionedObjectIDSet interface { // NewUnversionedObjectIDSet creates a new UnversionedObjectIDSet func NewUnversionedObjectIDSet(ids ...UnversionedObjectID) UnversionedObjectIDSet { - return (unversionedObjectIDSet{}).Insert(ids...) + return (make(unversionedObjectIDSet, len(ids))).Insert(ids...) } type unversionedObjectIDSet map[UnversionedObjectID]sets.Empty From f05750498816df0923dc573f22ad8ff51b580459 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Tue, 2 Feb 2021 00:03:30 +0200 Subject: [PATCH 108/149] Use the sets across the codebase. --- pkg/storage/backend/backend.go | 2 +- pkg/storage/backend/enforcer.go | 2 +- pkg/storage/client/client.go | 10 +++++----- pkg/storage/filesystem/filefinder_simple.go | 4 ++-- pkg/storage/filesystem/storage.go | 2 +- .../filesystem/unstructured/filefinder_mapped.go | 4 ++-- pkg/storage/interfaces.go | 2 +- 7 files changed, 13 insertions(+), 13 deletions(-) diff --git a/pkg/storage/backend/backend.go b/pkg/storage/backend/backend.go index 18a934b6..97c587bb 100644 --- a/pkg/storage/backend/backend.go +++ b/pkg/storage/backend/backend.go @@ -195,7 +195,7 @@ func (b *Generic) ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.S // For namespaced GroupKinds, the caller must provide a namespace, and for // root-spaced GroupKinds, the caller must not. When namespaced, this function // must only return object keys for that given namespace. -func (b *Generic) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) ([]core.UnversionedObjectID, error) { +func (b *Generic) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) (core.UnversionedObjectIDSet, error) { return b.storage.ListObjectIDs(ctx, gk, namespace) } diff --git a/pkg/storage/backend/enforcer.go b/pkg/storage/backend/enforcer.go index c4c7646c..95869bc0 100644 --- a/pkg/storage/backend/enforcer.go +++ b/pkg/storage/backend/enforcer.go @@ -105,7 +105,7 @@ func (e GenericNamespaceEnforcer) EnforceNamespace(ctx context.Context, obj core return err } // Loop through the IDs, and try to match it against the set ns - for _, id := range objIDs { + for _, id := range objIDs.List() { if id.ObjectKey().Name == ns { // Found the namespace; this is a valid setting return nil diff --git a/pkg/storage/client/client.go b/pkg/storage/client/client.go index 822bd63d..61ff4584 100644 --- a/pkg/storage/client/client.go +++ b/pkg/storage/client/client.go @@ -120,18 +120,18 @@ func (c *Generic) List(ctx context.Context, list core.ObjectList, opts ...client return errors.New("invalid namespace option: cannot filter namespace for root-spaced object") } - allIDs := []core.UnversionedObjectID{} + allIDs := core.NewUnversionedObjectIDSet() for ns := range namespaces { ids, err := c.Backend().ListObjectIDs(ctx, gk, ns) if err != nil { return err } - allIDs = append(allIDs, ids...) + allIDs.Insert(ids.List()...) } // Populate objs through the given (non-buffered) channel ch := make(chan core.Object) - objs := make([]kruntime.Object, 0, len(allIDs)) + objs := make([]kruntime.Object, 0, allIDs.Len()) // How should the object be created? createFunc := createObject(gvk, c.Scheme()) @@ -278,9 +278,9 @@ func createUnstructuredObject(gvk core.GroupVersionKind) newObjectFunc { } } -func (c *Generic) processKeys(ctx context.Context, ids []core.UnversionedObjectID, filterOpts *filter.FilterOptions, fn newObjectFunc, output chan core.Object) error { +func (c *Generic) processKeys(ctx context.Context, ids core.UnversionedObjectIDSet, filterOpts *filter.FilterOptions, fn newObjectFunc, output chan core.Object) error { goroutines := []func() error{} - for _, id := range ids { + for _, id := range ids.List() { goroutines = append(goroutines, c.processKey(ctx, id, filterOpts, fn, output)) } diff --git a/pkg/storage/filesystem/filefinder_simple.go b/pkg/storage/filesystem/filefinder_simple.go index 0d6489e7..ebb73d3d 100644 --- a/pkg/storage/filesystem/filefinder_simple.go +++ b/pkg/storage/filesystem/filefinder_simple.go @@ -179,7 +179,7 @@ func (f *SimpleFileFinder) ListNamespaces(ctx context.Context, gk core.GroupKind // root-spaced GroupKinds, the caller must not. When namespaced, this function // must only return object IDs for that given namespace. If any of the given // rules are violated, ErrNamespacedMismatch should be returned as a wrapped error. -func (f *SimpleFileFinder) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) ([]core.UnversionedObjectID, error) { +func (f *SimpleFileFinder) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) (core.UnversionedObjectIDSet, error) { // If namespace is empty, the names will be in ./, otherwise .// namesDir := filepath.Join(f.kindKeyPath(gk), namespace) entries, err := readDir(ctx, f.fs, namesDir) @@ -214,7 +214,7 @@ func (f *SimpleFileFinder) ListObjectIDs(ctx context.Context, gk core.GroupKind, // If we got this far, add the key to the list ids = append(ids, core.NewUnversionedObjectID(gk, core.ObjectKey{Name: entry, Namespace: namespace})) } - return ids, nil + return core.NewUnversionedObjectIDSet(ids...), nil } func readDir(ctx context.Context, fs Filesystem, dir string) ([]string, error) { diff --git a/pkg/storage/filesystem/storage.go b/pkg/storage/filesystem/storage.go index 0d1d06e5..ef084f33 100644 --- a/pkg/storage/filesystem/storage.go +++ b/pkg/storage/filesystem/storage.go @@ -151,7 +151,7 @@ func (r *Generic) ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.S // For namespaced GroupKinds, the caller must provide a namespace, and for // root-spaced GroupKinds, the caller must not. When namespaced, this function // must only return object IDs for that given namespace. -func (r *Generic) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) ([]core.UnversionedObjectID, error) { +func (r *Generic) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) (core.UnversionedObjectIDSet, error) { // Validate the namespace parameter if err := storage.VerifyNamespaced(r.Namespacer(), gk, namespace); err != nil { return nil, err diff --git a/pkg/storage/filesystem/unstructured/filefinder_mapped.go b/pkg/storage/filesystem/unstructured/filefinder_mapped.go index 4e2ef937..e8b929f5 100644 --- a/pkg/storage/filesystem/unstructured/filefinder_mapped.go +++ b/pkg/storage/filesystem/unstructured/filefinder_mapped.go @@ -113,13 +113,13 @@ func (f *GenericFileFinder) ListNamespaces(ctx context.Context, gk core.GroupKin // root-spaced GroupKinds, the caller must not. When namespaced, this function // must only return object IDs for that given namespace. If any of the given // rules are violated, ErrNamespacedMismatch should be returned as a wrapped error. -func (f *GenericFileFinder) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) ([]core.UnversionedObjectID, error) { +func (f *GenericFileFinder) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) (core.UnversionedObjectIDSet, error) { m := f.branch.groupKind(gk).namespace(namespace).raw() ids := make([]core.UnversionedObjectID, 0, len(m)) for name := range m { ids = append(ids, core.NewUnversionedObjectID(gk, core.ObjectKey{Name: name, Namespace: namespace})) } - return ids, nil + return core.NewUnversionedObjectIDSet(ids...), nil } // GetMapping retrieves a mapping in the system diff --git a/pkg/storage/interfaces.go b/pkg/storage/interfaces.go index ec0d041b..a016eff7 100644 --- a/pkg/storage/interfaces.go +++ b/pkg/storage/interfaces.go @@ -96,7 +96,7 @@ type Lister interface { // root-spaced GroupKinds, the caller must not. When namespaced, this function // must only return object IDs for that given namespace. If any of the given // rules are violated, ErrNamespacedMismatch should be returned as a wrapped error. - ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) ([]core.UnversionedObjectID, error) + ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) (core.UnversionedObjectIDSet, error) } // Reader provides the write operations for the Storage. From 8b276044268219bfd57b91f9e59b3362326ba742 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Tue, 2 Feb 2021 00:25:34 +0200 Subject: [PATCH 109/149] Make FileFinder.ObjectsAt support multiple files, potentially. --- pkg/storage/filesystem/filefinder_simple.go | 5 +- pkg/storage/filesystem/interfaces.go | 4 +- .../filesystem/unstructured/event/storage.go | 4 +- .../unstructured/filefinder_mapped.go | 77 ++++++++++++++----- .../filesystem/unstructured/storage.go | 21 ++++- 5 files changed, 84 insertions(+), 27 deletions(-) diff --git a/pkg/storage/filesystem/filefinder_simple.go b/pkg/storage/filesystem/filefinder_simple.go index ebb73d3d..ae17243b 100644 --- a/pkg/storage/filesystem/filefinder_simple.go +++ b/pkg/storage/filesystem/filefinder_simple.go @@ -146,9 +146,8 @@ func (f *SimpleFileFinder) kindKeyPath(gk core.GroupKind) string { return filepath.Join(group, gk.Kind) } -// ObjectAt retrieves the ID containing the virtual path based -// on the given physical file path. -func (f *SimpleFileFinder) ObjectAt(ctx context.Context, path string) (core.UnversionedObjectID, error) { +// ObjectsAt retrieves the ObjectIDs in the file with the given relative file path. +func (f *SimpleFileFinder) ObjectsAt(ctx context.Context, path string) (core.UnversionedObjectIDSet, error) { return nil, core.ErrNotImplemented } diff --git a/pkg/storage/filesystem/interfaces.go b/pkg/storage/filesystem/interfaces.go index 2626680b..d7638099 100644 --- a/pkg/storage/filesystem/interfaces.go +++ b/pkg/storage/filesystem/interfaces.go @@ -42,8 +42,8 @@ type FileFinder interface { // In order to support a create operation, this function must also return a valid path for // files that do not yet exist on disk. ObjectPath(ctx context.Context, id core.UnversionedObjectID) (string, error) - // ObjectAt retrieves the ID based on the given relative file path to fs. - ObjectAt(ctx context.Context, path string) (core.UnversionedObjectID, error) + // ObjectsAt retrieves the ObjectIDs in the file with the given relative file path. + ObjectsAt(ctx context.Context, path string) (core.UnversionedObjectIDSet, error) // The FileFinder should be able to list namespaces and Object IDs storage.Lister } diff --git a/pkg/storage/filesystem/unstructured/event/storage.go b/pkg/storage/filesystem/unstructured/event/storage.go index bd96fedc..0289e2dd 100644 --- a/pkg/storage/filesystem/unstructured/event/storage.go +++ b/pkg/storage/filesystem/unstructured/event/storage.go @@ -265,9 +265,9 @@ func (s *Generic) handleDelete(ctx context.Context, ev *fileevents.FileEvent) er // the known objects in such a way that it is able to do the reverse-lookup. For // mapped FileFinders, by this point the path should still be in the local cache, // which should make us able to get the ID before deleted from the cache. - objectID, err := s.UnstructuredFileFinder().ObjectAt(ctx, ev.Path) + objectID, err := unstructured.SingleObjectAt(ctx, s.UnstructuredFileFinder(), ev.Path) if err != nil { - return fmt.Errorf("failed to reverse lookup ID for deleted file %q: %v", ev.Path, err) + return fmt.Errorf("failed to reverse lookup ID for deleted file %q: %w", ev.Path, err) } // Remove the mapping from the FileFinder cache for this ID as it's now deleted diff --git a/pkg/storage/filesystem/unstructured/filefinder_mapped.go b/pkg/storage/filesystem/unstructured/filefinder_mapped.go index e8b929f5..6075aaa2 100644 --- a/pkg/storage/filesystem/unstructured/filefinder_mapped.go +++ b/pkg/storage/filesystem/unstructured/filefinder_mapped.go @@ -3,6 +3,7 @@ package unstructured import ( "context" "errors" + "sync" "github.com/weaveworks/libgitops/pkg/storage/core" "github.com/weaveworks/libgitops/pkg/storage/filesystem" @@ -30,9 +31,11 @@ func NewGenericFileFinder(contentTyper filesystem.ContentTyper, fs filesystem.Fi } return &GenericFileFinder{ contentTyper: contentTyper, + fs: fs, // TODO: Support multiple branches - branch: &branchImpl{}, - fs: fs, + branch: &branchImpl{}, + pathToIDs: make(map[string]core.UnversionedObjectIDSet), + mu: &sync.RWMutex{}, } } @@ -50,7 +53,10 @@ type GenericFileFinder struct { contentTyper filesystem.ContentTyper fs filesystem.Filesystem - branch branch + branch branch + pathToIDs map[string]core.UnversionedObjectIDSet + // mu guards branch and pathToIDs + mu *sync.RWMutex } func (f *GenericFileFinder) Filesystem() filesystem.Filesystem { @@ -71,22 +77,18 @@ func (f *GenericFileFinder) ObjectPath(ctx context.Context, id core.UnversionedO return cp.Path, nil } -// ObjectAt retrieves the ID containing the virtual path based -// on the given physical file path. -func (f *GenericFileFinder) ObjectAt(ctx context.Context, path string) (core.UnversionedObjectID, error) { - // TODO: Add reverse tracking too? - for gk, gkIter := range f.branch.raw() { - for ns, nsIter := range gkIter.raw() { - for name, cp := range nsIter.raw() { - if cp.Path == path { - return core.NewUnversionedObjectID(gk, core.ObjectKey{Name: name, Namespace: ns}), nil - } - } - } +// ObjectsAt retrieves the ObjectIDs in the file with the given relative file path. +func (f *GenericFileFinder) ObjectsAt(ctx context.Context, path string) (core.UnversionedObjectIDSet, error) { + f.mu.RLock() + defer f.mu.RUnlock() + // TODO: This needs to be per-branch too + ids, ok := f.pathToIDs[path] + if !ok { + // TODO: Support "creation" of Objects easier, in a generic way through an interface, e.g. + // NewObjectPlacer? + return nil, ErrNotTracked } - // TODO: Support "creation" of Objects easier, in a generic way through an interface, e.g. - // NewObjectPlacer? - return nil, ErrNotTracked + return ids, nil } // ListNamespaces lists the available namespaces for the given GroupKind. @@ -100,6 +102,9 @@ func (f *GenericFileFinder) ObjectAt(ctx context.Context, path string) (core.Unv // different namespaces that have been set on any object belonging to // the given GroupKind. func (f *GenericFileFinder) ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.String, error) { + f.mu.RLock() + defer f.mu.RUnlock() + m := f.branch.groupKind(gk).raw() nsSet := sets.NewString() for ns := range m { @@ -114,6 +119,9 @@ func (f *GenericFileFinder) ListNamespaces(ctx context.Context, gk core.GroupKin // must only return object IDs for that given namespace. If any of the given // rules are violated, ErrNamespacedMismatch should be returned as a wrapped error. func (f *GenericFileFinder) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) (core.UnversionedObjectIDSet, error) { + f.mu.RLock() + defer f.mu.RUnlock() + m := f.branch.groupKind(gk).namespace(namespace).raw() ids := make([]core.UnversionedObjectID, 0, len(m)) for name := range m { @@ -124,6 +132,13 @@ func (f *GenericFileFinder) ListObjectIDs(ctx context.Context, gk core.GroupKind // GetMapping retrieves a mapping in the system func (f *GenericFileFinder) GetMapping(ctx context.Context, id core.UnversionedObjectID) (ChecksumPath, bool) { + f.mu.RLock() + defer f.mu.RUnlock() + return f.getMapping(ctx, id) +} + +// getMapping is like GetMapping; but without a read lock; for internal operations +func (f *GenericFileFinder) getMapping(ctx context.Context, id core.UnversionedObjectID) (ChecksumPath, bool) { cp, ok := f.branch. groupKind(id.GroupKind()). namespace(id.ObjectKey().Namespace). @@ -133,10 +148,21 @@ func (f *GenericFileFinder) GetMapping(ctx context.Context, id core.UnversionedO // SetMapping binds an ID's virtual path to a physical file path func (f *GenericFileFinder) SetMapping(ctx context.Context, id core.UnversionedObjectID, checksumPath ChecksumPath) { + f.mu.Lock() + defer f.mu.Unlock() + f.branch. groupKind(id.GroupKind()). namespace(id.ObjectKey().Namespace). setName(id.ObjectKey().Name, checksumPath) + + // Create the mapping between the path and a set of IDs if it doesn't exist + _, ok := f.pathToIDs[checksumPath.Path] + if !ok { + f.pathToIDs[checksumPath.Path] = core.NewUnversionedObjectIDSet() + } + // Register the ID with the given path + f.pathToIDs[checksumPath.Path].Insert(id) } // ResetMappings replaces all mappings at once @@ -150,8 +176,23 @@ func (f *GenericFileFinder) ResetMappings(ctx context.Context, m map[core.Unvers // DeleteMapping removes the physical file path mapping // matching the given id func (f *GenericFileFinder) DeleteMapping(ctx context.Context, id core.UnversionedObjectID) { + f.mu.Lock() + defer f.mu.Unlock() + + cp, ok := f.getMapping(ctx, id) + if !ok { + // Nothing to delete if it doesn't exist yet + return + } + // Delete it from the cache f.branch. groupKind(id.GroupKind()). namespace(id.ObjectKey().Namespace). deleteName(id.ObjectKey().Name) + // Delete the related ID from the path mapping too + f.pathToIDs[cp.Path].Delete(id) + // If the length of the set was shrunk to zero; delete it from the map completely + if f.pathToIDs[cp.Path].Len() == 0 { + delete(f.pathToIDs, cp.Path) + } } diff --git a/pkg/storage/filesystem/unstructured/storage.go b/pkg/storage/filesystem/unstructured/storage.go index 5f10a4c8..5f30d9a4 100644 --- a/pkg/storage/filesystem/unstructured/storage.go +++ b/pkg/storage/filesystem/unstructured/storage.go @@ -11,6 +11,9 @@ import ( "github.com/weaveworks/libgitops/pkg/storage/filesystem" ) +// ErrOnlySingleFrameSupported tells that only single frame-files are supported so far for the unstructured Storage. +var ErrOnlySingleFrameSupported = errors.New("file contains multiple Objects; for now only single-frame files are supported") + func NewGeneric(storage filesystem.Storage, recognizer ObjectRecognizer, pathExcluder filesystem.PathExcluder) (Storage, error) { if storage == nil { return nil, fmt.Errorf("storage is mandatory") @@ -67,7 +70,7 @@ func (s *Generic) Sync(ctx context.Context) ([]ChecksumPathID, error) { // If the given file already is tracked; i.e. has a mapping with a // non-empty checksum, and the current checksum matches, we do not // need to do anything. - if id, err := fileFinder.ObjectAt(ctx, filePath); err == nil { + if id, err := SingleObjectAt(ctx, fileFinder, filePath); err == nil { if cp, ok := fileFinder.GetMapping(ctx, id); ok && len(cp.Checksum) != 0 { if cp.Checksum == currentChecksum { logrus.Tracef("Checksum for file %q is up-to-date: %q, skipping...", filePath, cp.Checksum) @@ -150,8 +153,22 @@ func ReadAndRecognizeFile( // For now; we only support single-frame files // TODO: Change this. if ids.Len() != 1 { - return nil, fmt.Errorf("File %q contained multiple objects; for now only single-frame files are supported", filePath) + return nil, fmt.Errorf("%w: %q", ErrOnlySingleFrameSupported, filePath) } // Return that one ID return ids.List()[0], nil } + +func SingleObjectAt(ctx context.Context, fileFinder filesystem.FileFinder, filePath string) (core.UnversionedObjectID, error) { + idSet, err := fileFinder.ObjectsAt(ctx, filePath) + if err != nil { + return nil, err + } + // For now; we only support single-frame files + // TODO: Change this. + if idSet.Len() != 1 { + return nil, fmt.Errorf("%w: %q", ErrOnlySingleFrameSupported, filePath) + } + // Return that one ID + return idSet.List()[0], nil +} From 24d1b72ca53f3053ee4c44005cfd9a6e90878261 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Tue, 2 Feb 2021 00:52:13 +0200 Subject: [PATCH 110/149] Minor code move: Move all Object-specific code from core to the client and backend, respectively. --- pkg/storage/backend/backend.go | 32 +++++----- pkg/storage/backend/enforcer.go | 4 +- pkg/storage/client/client.go | 62 +++++-------------- pkg/storage/client/interfaces.go | 50 +++++++++++++++ pkg/storage/client/options.go | 28 ++++----- pkg/storage/client/transactional/client.go | 4 +- .../transactional/distributed/client.go | 5 +- .../client/transactional/interfaces.go | 36 +++++------ pkg/storage/client/transactional/tx_ops.go | 37 +++++------ pkg/storage/client/utils.go | 4 +- pkg/storage/core/interfaces.go | 32 ++++------ .../filesystem/unstructured/recognizer.go | 2 +- 12 files changed, 156 insertions(+), 140 deletions(-) create mode 100644 pkg/storage/client/interfaces.go diff --git a/pkg/storage/backend/backend.go b/pkg/storage/backend/backend.go index 97c587bb..8bcaae7b 100644 --- a/pkg/storage/backend/backend.go +++ b/pkg/storage/backend/backend.go @@ -11,6 +11,7 @@ import ( "github.com/weaveworks/libgitops/pkg/storage/core" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" + "sigs.k8s.io/controller-runtime/pkg/client" ) var ( @@ -29,6 +30,9 @@ var ( // 5. Status { Data []byte, ContentType ContentType, Object interface{} } // TODO: Need to make sure we never write this internal struct to disk (MarshalJSON error?) +// Create an alias for the Object type +type Object = client.Object + type Accessors interface { Storage() storage.Storage NamespaceEnforcer() NamespaceEnforcer @@ -44,7 +48,7 @@ type WriteAccessors interface { type Reader interface { Accessors - Get(ctx context.Context, obj core.Object) error + Get(ctx context.Context, obj Object) error storage.Lister } @@ -52,16 +56,16 @@ type Writer interface { Accessors WriteAccessors - Create(ctx context.Context, obj core.Object) error - Update(ctx context.Context, obj core.Object) error - Delete(ctx context.Context, obj core.Object) error + Create(ctx context.Context, obj Object) error + Update(ctx context.Context, obj Object) error + Delete(ctx context.Context, obj Object) error } type StatusWriter interface { Accessors WriteAccessors - UpdateStatus(ctx context.Context, obj core.Object) error + UpdateStatus(ctx context.Context, obj Object) error } type Backend interface { @@ -79,7 +83,7 @@ const ( ) type Validator interface { - ValidateChange(ctx context.Context, backend Reader, op ChangeOperation, obj core.Object) error + ValidateChange(ctx context.Context, backend Reader, op ChangeOperation, obj Object) error } // NewGeneric creates a new generic Backend for the given underlying Storage for storing the @@ -161,7 +165,7 @@ func (b *Generic) StorageVersioner() StorageVersioner { return b.versioner } -func (b *Generic) Get(ctx context.Context, obj core.Object) error { +func (b *Generic) Get(ctx context.Context, obj Object) error { // Get the versioned ID for the given obj. This might mutate obj wrt namespacing info. id, err := b.idForObj(ctx, obj) if err != nil { @@ -199,7 +203,7 @@ func (b *Generic) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespac return b.storage.ListObjectIDs(ctx, gk, namespace) } -func (b *Generic) Create(ctx context.Context, obj core.Object) error { +func (b *Generic) Create(ctx context.Context, obj Object) error { // We must never save metadata-only structs if serializer.IsPartialObject(obj) { return ErrCannotSaveMetadata @@ -227,7 +231,7 @@ func (b *Generic) Create(ctx context.Context, obj core.Object) error { // Internal, common write shared with Update() return b.write(ctx, id, obj) } -func (b *Generic) Update(ctx context.Context, obj core.Object) error { +func (b *Generic) Update(ctx context.Context, obj Object) error { // We must never save metadata-only structs if serializer.IsPartialObject(obj) { return ErrCannotSaveMetadata @@ -256,11 +260,11 @@ func (b *Generic) Update(ctx context.Context, obj core.Object) error { return b.write(ctx, id, obj) } -func (b *Generic) UpdateStatus(ctx context.Context, obj core.Object) error { +func (b *Generic) UpdateStatus(ctx context.Context, obj Object) error { return core.ErrNotImplemented // TODO } -func (b *Generic) write(ctx context.Context, id core.ObjectID, obj core.Object) error { +func (b *Generic) write(ctx context.Context, id core.ObjectID, obj Object) error { // Get the content type of the object ct, err := b.storage.ContentType(ctx, id) if err != nil { @@ -289,7 +293,7 @@ func (b *Generic) write(ctx context.Context, id core.ObjectID, obj core.Object) return b.storage.Write(ctx, id, objBytes.Bytes()) } -func (b *Generic) Delete(ctx context.Context, obj core.Object) error { +func (b *Generic) Delete(ctx context.Context, obj Object) error { // Get the versioned ID for the given obj. This might mutate obj wrt namespacing info. id, err := b.idForObj(ctx, obj) if err != nil { @@ -314,7 +318,7 @@ func (b *Generic) Delete(ctx context.Context, obj core.Object) error { } // Note: This should also work for unstructured and partial metadata objects -func (b *Generic) idForObj(ctx context.Context, obj core.Object) (core.ObjectID, error) { +func (b *Generic) idForObj(ctx context.Context, obj Object) (core.ObjectID, error) { gvk, err := serializer.GVKForObject(b.Scheme(), obj) if err != nil { return nil, err @@ -339,5 +343,5 @@ func (b *Generic) idForObj(ctx context.Context, obj core.Object) (core.ObjectID, // At this point we know name is non-empty, and the namespace field is correct, // according to policy - return core.NewObjectID(gvk, core.ObjectKeyFromObject(obj)), nil + return core.NewObjectID(gvk, core.ObjectKeyFromMetav1Object(obj)), nil } diff --git a/pkg/storage/backend/enforcer.go b/pkg/storage/backend/enforcer.go index 95869bc0..91ba2deb 100644 --- a/pkg/storage/backend/enforcer.go +++ b/pkg/storage/backend/enforcer.go @@ -30,7 +30,7 @@ type NamespaceEnforcer interface { // // See GenericNamespaceEnforcer for an example implementation, or // pkg/storage/kube.NewNamespaceEnforcer() for a sample application. - EnforceNamespace(ctx context.Context, obj core.Object, gvk core.GroupVersionKind, namespacer storage.Namespacer, lister storage.Lister) error + EnforceNamespace(ctx context.Context, obj Object, gvk core.GroupVersionKind, namespacer storage.Namespacer, lister storage.Lister) error } // GenericNamespaceEnforcer is a NamespaceEnforcer that: @@ -61,7 +61,7 @@ type GenericNamespaceEnforcer struct { NamespaceGroupKind *core.GroupKind } -func (e GenericNamespaceEnforcer) EnforceNamespace(ctx context.Context, obj core.Object, gvk core.GroupVersionKind, namespacer storage.Namespacer, lister storage.Lister) error { +func (e GenericNamespaceEnforcer) EnforceNamespace(ctx context.Context, obj Object, gvk core.GroupVersionKind, namespacer storage.Namespacer, lister storage.Lister) error { // Get namespacing info namespaced, err := namespacer.IsNamespaced(gvk.GroupKind()) if err != nil { diff --git a/pkg/storage/client/client.go b/pkg/storage/client/client.go index 61ff4584..7bb4e1dd 100644 --- a/pkg/storage/client/client.go +++ b/pkg/storage/client/client.go @@ -17,41 +17,11 @@ import ( kruntime "k8s.io/apimachinery/pkg/runtime" utilerrs "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" - "sigs.k8s.io/controller-runtime/pkg/client" ) // TODO: Pass an ObjectID that contains all PartialObjectMetadata info for "downstream" consumers // that can make use of it by "casting up". -var ( - // ErrUnsupportedPatchType is returned when an unsupported patch type is used - ErrUnsupportedPatchType = errors.New("unsupported patch type") -) - -type Reader interface { - client.Reader - BackendReader() backend.Reader -} - -type Writer interface { - client.Writer - BackendWriter() backend.Writer -} - -type StatusClient interface { - client.StatusClient - BackendStatusWriter() backend.StatusWriter -} - -// Client is an interface for persisting and retrieving API objects to/from a backend -// One Client instance handles all different Kinds of Objects -type Client interface { - Reader - Writer - // TODO: StatusClient - //client.Client -} - // NewGeneric constructs a new Generic client // TODO: Construct the default patcher from the given scheme, make patcher an opt instead func NewGeneric(backend backend.Backend) (*Generic, error) { @@ -75,7 +45,7 @@ func (c *Generic) BackendWriter() backend.Writer { return c.backend } // Get returns a new Object for the resource at the specified kind/uid path, based on the file content. // In order to only extract the metadata of this object, pass in a *metav1.PartialObjectMetadata -func (c *Generic) Get(ctx context.Context, key core.ObjectKey, obj core.Object) error { +func (c *Generic) Get(ctx context.Context, key core.ObjectKey, obj Object) error { obj.SetName(key.Name) obj.SetNamespace(key.Namespace) @@ -90,14 +60,14 @@ func (c *Generic) Get(ctx context.Context, key core.ObjectKey, obj core.Object) // you need to populate TypeMeta with the GVK you want back. // TODO: Check if this works with metav1.List{} // TODO: Create constructors for the different kinds of lists? -func (c *Generic) List(ctx context.Context, list core.ObjectList, opts ...client.ListOption) error { +func (c *Generic) List(ctx context.Context, list ObjectList, opts ...ListOption) error { // This call will verify that list actually is a List type. gvk, err := serializer.GVKForList(list, c.Scheme()) if err != nil { return err } // This applies both upstream and custom options - listOpts := (&ListOptions{}).ApplyOptions(opts) + listOpts := (&ExtendedListOptions{}).ApplyOptions(opts) // Get namespacing info gk := gvk.GroupKind() @@ -130,7 +100,7 @@ func (c *Generic) List(ctx context.Context, list core.ObjectList, opts ...client } // Populate objs through the given (non-buffered) channel - ch := make(chan core.Object) + ch := make(chan Object) objs := make([]kruntime.Object, 0, allIDs.Len()) // How should the object be created? @@ -157,16 +127,16 @@ func (c *Generic) List(ctx context.Context, list core.ObjectList, opts ...client return meta.SetList(list, objs) } -func (c *Generic) Create(ctx context.Context, obj core.Object, _ ...client.CreateOption) error { +func (c *Generic) Create(ctx context.Context, obj Object, _ ...CreateOption) error { return c.backend.Create(ctx, obj) } -func (c *Generic) Update(ctx context.Context, obj core.Object, _ ...client.UpdateOption) error { +func (c *Generic) Update(ctx context.Context, obj Object, _ ...UpdateOption) error { return c.backend.Update(ctx, obj) } // Patch performs a strategic merge patch on the object with the given UID, using the byte-encoded patch given -func (c *Generic) Patch(ctx context.Context, obj core.Object, patch core.Patch, _ ...client.PatchOption) error { +func (c *Generic) Patch(ctx context.Context, obj Object, patch Patch, _ ...PatchOption) error { // Fail-fast: We must never save metadata-only structs if serializer.IsPartialObject(obj) { return backend.ErrCannotSaveMetadata @@ -210,16 +180,16 @@ func (c *Generic) Patch(ctx context.Context, obj core.Object, patch core.Patch, // Delete removes an Object from the backend // PartialObjectMetadata should work here. -func (c *Generic) Delete(ctx context.Context, obj core.Object, _ ...client.DeleteOption) error { +func (c *Generic) Delete(ctx context.Context, obj Object, _ ...DeleteOption) error { return c.backend.Delete(ctx, obj) } // DeleteAllOf deletes all matched resources by first doing a List() operation on the given GVK of // obj (obj is not used for anything else) and the given filters in opts. Only the Partial Meta -func (c *Generic) DeleteAllOf(ctx context.Context, obj core.Object, opts ...client.DeleteAllOfOption) error { +func (c *Generic) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { // This applies both upstream and custom options, and propagates the options correctly to both // List() and Delete() - customDeleteAllOpts := (&DeleteAllOfOptions{}).ApplyOptions(opts) + customDeleteAllOpts := (&ExtendedDeleteAllOfOptions{}).ApplyOptions(opts) // Get the GVK of the object gvk, err := serializer.GVKForObject(c.Scheme(), obj) @@ -254,16 +224,16 @@ func (c *Generic) RESTMapper() meta.RESTMapper { return nil } -type newObjectFunc func() (core.Object, error) +type newObjectFunc func() (Object, error) func createObject(gvk core.GroupVersionKind, scheme *kruntime.Scheme) newObjectFunc { - return func() (core.Object, error) { + return func() (Object, error) { return NewObjectForGVK(gvk, scheme) } } func createPartialObject(gvk core.GroupVersionKind) newObjectFunc { - return func() (core.Object, error) { + return func() (Object, error) { obj := &metav1.PartialObjectMetadata{} obj.SetGroupVersionKind(gvk) return obj, nil @@ -271,14 +241,14 @@ func createPartialObject(gvk core.GroupVersionKind) newObjectFunc { } func createUnstructuredObject(gvk core.GroupVersionKind) newObjectFunc { - return func() (core.Object, error) { + return func() (Object, error) { obj := &unstructured.Unstructured{} obj.SetGroupVersionKind(gvk) return obj, nil } } -func (c *Generic) processKeys(ctx context.Context, ids core.UnversionedObjectIDSet, filterOpts *filter.FilterOptions, fn newObjectFunc, output chan core.Object) error { +func (c *Generic) processKeys(ctx context.Context, ids core.UnversionedObjectIDSet, filterOpts *filter.FilterOptions, fn newObjectFunc, output chan Object) error { goroutines := []func() error{} for _, id := range ids.List() { goroutines = append(goroutines, c.processKey(ctx, id, filterOpts, fn, output)) @@ -289,7 +259,7 @@ func (c *Generic) processKeys(ctx context.Context, ids core.UnversionedObjectIDS return utilerrs.AggregateGoroutines(goroutines...) } -func (c *Generic) processKey(ctx context.Context, id core.UnversionedObjectID, filterOpts *filter.FilterOptions, fn newObjectFunc, output chan core.Object) func() error { +func (c *Generic) processKey(ctx context.Context, id core.UnversionedObjectID, filterOpts *filter.FilterOptions, fn newObjectFunc, output chan Object) func() error { return func() error { // Create a new object, and decode into it using Get obj, err := fn() diff --git a/pkg/storage/client/interfaces.go b/pkg/storage/client/interfaces.go new file mode 100644 index 00000000..dc80e78f --- /dev/null +++ b/pkg/storage/client/interfaces.go @@ -0,0 +1,50 @@ +package client + +import ( + "errors" + + "github.com/weaveworks/libgitops/pkg/storage/backend" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Client-related Object aliases +type Object = client.Object +type ObjectList = client.ObjectList +type Patch = client.Patch + +// Client-related Option aliases +type ListOption = client.ListOption +type CreateOption = client.CreateOption +type UpdateOption = client.UpdateOption +type PatchOption = client.PatchOption +type DeleteOption = client.DeleteOption +type DeleteAllOfOption = client.DeleteAllOfOption + +var ( + // ErrUnsupportedPatchType is returned when an unsupported patch type is used + ErrUnsupportedPatchType = errors.New("unsupported patch type") +) + +type Reader interface { + client.Reader + BackendReader() backend.Reader +} + +type Writer interface { + client.Writer + BackendWriter() backend.Writer +} + +type StatusClient interface { + client.StatusClient + BackendStatusWriter() backend.StatusWriter +} + +// Client is an interface for persisting and retrieving API objects to/from a backend +// One Client instance handles all different Kinds of Objects +type Client interface { + Reader + Writer + // TODO: StatusClient + //client.Client +} diff --git a/pkg/storage/client/options.go b/pkg/storage/client/options.go index 7fa8f8ed..08ec9f0a 100644 --- a/pkg/storage/client/options.go +++ b/pkg/storage/client/options.go @@ -5,27 +5,27 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -type ListOption interface { +type ExtendedListOption interface { client.ListOption filter.FilterOption } -type ListOptions struct { +type ExtendedListOptions struct { client.ListOptions filter.FilterOptions } -var _ ListOption = &ListOptions{} +var _ ExtendedListOption = &ExtendedListOptions{} -func (o *ListOptions) ApplyToList(target *client.ListOptions) { +func (o *ExtendedListOptions) ApplyToList(target *client.ListOptions) { o.ListOptions.ApplyToList(target) } -func (o *ListOptions) ApplyToFilterOptions(target *filter.FilterOptions) { +func (o *ExtendedListOptions) ApplyToFilterOptions(target *filter.FilterOptions) { o.FilterOptions.ApplyToFilterOptions(target) } -func (o *ListOptions) ApplyOptions(opts []client.ListOption) *ListOptions { +func (o *ExtendedListOptions) ApplyOptions(opts []client.ListOption) *ExtendedListOptions { // Apply the "normal" ListOptions o.ListOptions.ApplyOptions(opts) // Apply all FilterOptions, if they implement that interface @@ -45,26 +45,26 @@ func (o *ListOptions) ApplyOptions(opts []client.ListOption) *ListOptions { return o } -type DeleteAllOfOption interface { - ListOption +type ExtendedDeleteAllOfOption interface { + ExtendedListOption client.DeleteAllOfOption } -type DeleteAllOfOptions struct { - ListOptions +type ExtendedDeleteAllOfOptions struct { + ExtendedListOptions client.DeleteOptions } -var _ DeleteAllOfOption = &DeleteAllOfOptions{} +var _ ExtendedDeleteAllOfOption = &ExtendedDeleteAllOfOptions{} -func (o *DeleteAllOfOptions) ApplyToDeleteAllOf(target *client.DeleteAllOfOptions) { +func (o *ExtendedDeleteAllOfOptions) ApplyToDeleteAllOf(target *client.DeleteAllOfOptions) { o.DeleteOptions.ApplyToDelete(&target.DeleteOptions) } -func (o *DeleteAllOfOptions) ApplyOptions(opts []client.DeleteAllOfOption) *DeleteAllOfOptions { +func (o *ExtendedDeleteAllOfOptions) ApplyOptions(opts []client.DeleteAllOfOption) *ExtendedDeleteAllOfOptions { // Cannot directly apply to o, hence, create a temporary object to which upstream opts are applied do := (&client.DeleteAllOfOptions{}).ApplyOptions(opts) - o.ListOptions.ListOptions = do.ListOptions + o.ExtendedListOptions.ListOptions = do.ListOptions o.DeleteOptions = do.DeleteOptions // Apply all FilterOptions, if they implement that interface diff --git a/pkg/storage/client/transactional/client.go b/pkg/storage/client/transactional/client.go index 9a615414..caff0c96 100644 --- a/pkg/storage/client/transactional/client.go +++ b/pkg/storage/client/transactional/client.go @@ -65,13 +65,13 @@ type txLock struct { active uint32 } -func (c *Generic) Get(ctx context.Context, key core.ObjectKey, obj core.Object) error { +func (c *Generic) Get(ctx context.Context, key core.ObjectKey, obj client.Object) error { return c.lockForReading(ctx, func() error { return c.c.Get(ctx, key, obj) }) } -func (c *Generic) List(ctx context.Context, list core.ObjectList, opts ...core.ListOption) error { +func (c *Generic) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { return c.lockForReading(ctx, func() error { return c.c.List(ctx, list, opts...) }) diff --git a/pkg/storage/client/transactional/distributed/client.go b/pkg/storage/client/transactional/distributed/client.go index a92e54a9..d0b0893a 100644 --- a/pkg/storage/client/transactional/distributed/client.go +++ b/pkg/storage/client/transactional/distributed/client.go @@ -7,6 +7,7 @@ import ( "time" "github.com/sirupsen/logrus" + "github.com/weaveworks/libgitops/pkg/storage/client" "github.com/weaveworks/libgitops/pkg/storage/client/transactional" "github.com/weaveworks/libgitops/pkg/storage/core" "k8s.io/apimachinery/pkg/util/wait" @@ -57,13 +58,13 @@ type branchLock struct { lastPull time.Time } -func (c *Generic) Get(ctx context.Context, key core.ObjectKey, obj core.Object) error { +func (c *Generic) Get(ctx context.Context, key core.ObjectKey, obj client.Object) error { return c.readWhenPossible(ctx, func() error { return c.Client.Get(ctx, key, obj) }) } -func (c *Generic) List(ctx context.Context, list core.ObjectList, opts ...core.ListOption) error { +func (c *Generic) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { return c.readWhenPossible(ctx, func() error { return c.Client.List(ctx, list, opts...) }) diff --git a/pkg/storage/client/transactional/interfaces.go b/pkg/storage/client/transactional/interfaces.go index dac57cc6..3485194d 100644 --- a/pkg/storage/client/transactional/interfaces.go +++ b/pkg/storage/client/transactional/interfaces.go @@ -53,17 +53,17 @@ type Tx interface { Custom(CustomTxFunc) Tx - Get(key core.ObjectKey, obj core.Object) Tx - List(list core.ObjectList, opts ...core.ListOption) Tx + Get(key core.ObjectKey, obj client.Object) Tx + List(list client.ObjectList, opts ...client.ListOption) Tx - Create(obj core.Object, opts ...core.CreateOption) Tx - Update(obj core.Object, opts ...core.UpdateOption) Tx - Patch(obj core.Object, patch core.Patch, opts ...core.PatchOption) Tx - Delete(obj core.Object, opts ...core.DeleteOption) Tx - DeleteAllOf(obj core.Object, opts ...core.DeleteAllOfOption) Tx + Create(obj client.Object, opts ...client.CreateOption) Tx + Update(obj client.Object, opts ...client.UpdateOption) Tx + Patch(obj client.Object, patch client.Patch, opts ...client.PatchOption) Tx + Delete(obj client.Object, opts ...client.DeleteOption) Tx + DeleteAllOf(obj client.Object, opts ...client.DeleteAllOfOption) Tx - UpdateStatus(obj core.Object, opts ...core.UpdateOption) Tx - PatchStatus(obj core.Object, patch core.Patch, opts ...core.PatchOption) Tx + UpdateStatus(obj client.Object, opts ...client.UpdateOption) Tx + PatchStatus(obj client.Object, patch client.Patch, opts ...client.PatchOption) Tx } type BranchTx interface { @@ -74,17 +74,17 @@ type BranchTx interface { Custom(CustomTxFunc) BranchTx - Get(key core.ObjectKey, obj core.Object) BranchTx - List(list core.ObjectList, opts ...core.ListOption) BranchTx + Get(key core.ObjectKey, obj client.Object) BranchTx + List(list client.ObjectList, opts ...client.ListOption) BranchTx - Create(obj core.Object, opts ...core.CreateOption) BranchTx - Update(obj core.Object, opts ...core.UpdateOption) BranchTx - Patch(obj core.Object, patch core.Patch, opts ...core.PatchOption) BranchTx - Delete(obj core.Object, opts ...core.DeleteOption) BranchTx - DeleteAllOf(obj core.Object, opts ...core.DeleteAllOfOption) BranchTx + Create(obj client.Object, opts ...client.CreateOption) BranchTx + Update(obj client.Object, opts ...client.UpdateOption) BranchTx + Patch(obj client.Object, patch client.Patch, opts ...client.PatchOption) BranchTx + Delete(obj client.Object, opts ...client.DeleteOption) BranchTx + DeleteAllOf(obj client.Object, opts ...client.DeleteAllOfOption) BranchTx - UpdateStatus(obj core.Object, opts ...core.UpdateOption) BranchTx - PatchStatus(obj core.Object, patch core.Patch, opts ...core.PatchOption) BranchTx + UpdateStatus(obj client.Object, opts ...client.UpdateOption) BranchTx + PatchStatus(obj client.Object, patch client.Patch, opts ...client.PatchOption) BranchTx } type BranchTxResult interface { diff --git a/pkg/storage/client/transactional/tx_ops.go b/pkg/storage/client/transactional/tx_ops.go index de066d3c..766d0fd4 100644 --- a/pkg/storage/client/transactional/tx_ops.go +++ b/pkg/storage/client/transactional/tx_ops.go @@ -3,55 +3,56 @@ package transactional import ( "context" + "github.com/weaveworks/libgitops/pkg/storage/client" "github.com/weaveworks/libgitops/pkg/storage/core" ) // Implement the required "fluent/functional" methods on BranchTx. // Go doesn't have generics; hence we need to do this twice. -func (tx *txImpl) Get(key core.ObjectKey, obj core.Object) Tx { +func (tx *txImpl) Get(key core.ObjectKey, obj client.Object) Tx { return tx.Custom(func(ctx context.Context) error { return tx.c.Get(ctx, key, obj) }) } -func (tx *txImpl) List(list core.ObjectList, opts ...core.ListOption) Tx { +func (tx *txImpl) List(list client.ObjectList, opts ...client.ListOption) Tx { return tx.Custom(func(ctx context.Context) error { return tx.c.List(ctx, list, opts...) }) } -func (tx *txImpl) Create(obj core.Object, opts ...core.CreateOption) Tx { +func (tx *txImpl) Create(obj client.Object, opts ...client.CreateOption) Tx { return tx.Custom(func(ctx context.Context) error { return tx.c.Create(ctx, obj, opts...) }) } -func (tx *txImpl) Update(obj core.Object, opts ...core.UpdateOption) Tx { +func (tx *txImpl) Update(obj client.Object, opts ...client.UpdateOption) Tx { return tx.Custom(func(ctx context.Context) error { return tx.c.Update(ctx, obj, opts...) }) } -func (tx *txImpl) Patch(obj core.Object, patch core.Patch, opts ...core.PatchOption) Tx { +func (tx *txImpl) Patch(obj client.Object, patch client.Patch, opts ...client.PatchOption) Tx { return tx.Custom(func(ctx context.Context) error { return tx.c.Patch(ctx, obj, patch, opts...) }) } -func (tx *txImpl) Delete(obj core.Object, opts ...core.DeleteOption) Tx { +func (tx *txImpl) Delete(obj client.Object, opts ...client.DeleteOption) Tx { return tx.Custom(func(ctx context.Context) error { return tx.c.Delete(ctx, obj, opts...) }) } -func (tx *txImpl) DeleteAllOf(obj core.Object, opts ...core.DeleteAllOfOption) Tx { +func (tx *txImpl) DeleteAllOf(obj client.Object, opts ...client.DeleteAllOfOption) Tx { return tx.Custom(func(ctx context.Context) error { return tx.c.DeleteAllOf(ctx, obj, opts...) }) } -func (tx *txImpl) UpdateStatus(obj core.Object, opts ...core.UpdateOption) Tx { +func (tx *txImpl) UpdateStatus(obj client.Object, opts ...client.UpdateOption) Tx { return tx.Custom(func(ctx context.Context) error { return nil // TODO tx.c.Status().Update(ctx, obj, opts...) }) } -func (tx *txImpl) PatchStatus(obj core.Object, patch core.Patch, opts ...core.PatchOption) Tx { +func (tx *txImpl) PatchStatus(obj client.Object, patch client.Patch, opts ...client.PatchOption) Tx { return tx.Custom(func(ctx context.Context) error { return nil // TODO tx.c.Status().Patch(ctx, obj, patch, opts...) }) @@ -60,49 +61,49 @@ func (tx *txImpl) PatchStatus(obj core.Object, patch core.Patch, opts ...core.Pa // Implement the required "fluent/functional" methods on BranchTx. // Go doesn't have generics; hence we need to do this twice. -func (tx *txBranchImpl) Get(key core.ObjectKey, obj core.Object) BranchTx { +func (tx *txBranchImpl) Get(key core.ObjectKey, obj client.Object) BranchTx { return tx.Custom(func(ctx context.Context) error { return tx.c.Get(ctx, key, obj) }) } -func (tx *txBranchImpl) List(list core.ObjectList, opts ...core.ListOption) BranchTx { +func (tx *txBranchImpl) List(list client.ObjectList, opts ...client.ListOption) BranchTx { return tx.Custom(func(ctx context.Context) error { return tx.c.List(ctx, list, opts...) }) } -func (tx *txBranchImpl) Create(obj core.Object, opts ...core.CreateOption) BranchTx { +func (tx *txBranchImpl) Create(obj client.Object, opts ...client.CreateOption) BranchTx { return tx.Custom(func(ctx context.Context) error { return tx.c.Create(ctx, obj, opts...) }) } -func (tx *txBranchImpl) Update(obj core.Object, opts ...core.UpdateOption) BranchTx { +func (tx *txBranchImpl) Update(obj client.Object, opts ...client.UpdateOption) BranchTx { return tx.Custom(func(ctx context.Context) error { return tx.c.Update(ctx, obj, opts...) }) } -func (tx *txBranchImpl) Patch(obj core.Object, patch core.Patch, opts ...core.PatchOption) BranchTx { +func (tx *txBranchImpl) Patch(obj client.Object, patch client.Patch, opts ...client.PatchOption) BranchTx { return tx.Custom(func(ctx context.Context) error { return tx.c.Patch(ctx, obj, patch, opts...) }) } -func (tx *txBranchImpl) Delete(obj core.Object, opts ...core.DeleteOption) BranchTx { +func (tx *txBranchImpl) Delete(obj client.Object, opts ...client.DeleteOption) BranchTx { return tx.Custom(func(ctx context.Context) error { return tx.c.Delete(ctx, obj, opts...) }) } -func (tx *txBranchImpl) DeleteAllOf(obj core.Object, opts ...core.DeleteAllOfOption) BranchTx { +func (tx *txBranchImpl) DeleteAllOf(obj client.Object, opts ...client.DeleteAllOfOption) BranchTx { return tx.Custom(func(ctx context.Context) error { return tx.c.DeleteAllOf(ctx, obj, opts...) }) } -func (tx *txBranchImpl) UpdateStatus(obj core.Object, opts ...core.UpdateOption) BranchTx { +func (tx *txBranchImpl) UpdateStatus(obj client.Object, opts ...client.UpdateOption) BranchTx { return tx.Custom(func(ctx context.Context) error { return nil // TODO tx.c.Status().Update(ctx, obj, opts...) }) } -func (tx *txBranchImpl) PatchStatus(obj core.Object, patch core.Patch, opts ...core.PatchOption) BranchTx { +func (tx *txBranchImpl) PatchStatus(obj client.Object, patch client.Patch, opts ...client.PatchOption) BranchTx { return tx.Custom(func(ctx context.Context) error { return nil // TODO tx.c.Status().Patch(ctx, obj, patch, opts...) }) diff --git a/pkg/storage/client/utils.go b/pkg/storage/client/utils.go index da869085..fb8c79a5 100644 --- a/pkg/storage/client/utils.go +++ b/pkg/storage/client/utils.go @@ -10,12 +10,12 @@ import ( var ErrNoMetadata = errors.New("it is required to embed ObjectMeta into the serialized API type") -func NewObjectForGVK(gvk core.GroupVersionKind, scheme *runtime.Scheme) (core.Object, error) { +func NewObjectForGVK(gvk core.GroupVersionKind, scheme *runtime.Scheme) (Object, error) { kobj, err := scheme.New(gvk) if err != nil { return nil, err } - obj, ok := kobj.(core.Object) + obj, ok := kobj.(Object) if !ok { return nil, fmt.Errorf("%w: %s", ErrNoMetadata, gvk) } diff --git a/pkg/storage/core/interfaces.go b/pkg/storage/core/interfaces.go index 546b7caf..8e93b87d 100644 --- a/pkg/storage/core/interfaces.go +++ b/pkg/storage/core/interfaces.go @@ -1,37 +1,27 @@ package core import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" ) -// Note: package core must not depend on any other parts of the libgitops repo, possibly the serializer package as an exception. -// Anything under k8s.io/apimachinery goes though, and important external imports -// like github.com/spf13/afero is also ok. The pretty large sigs.k8s.io/controller-runtime -// import is a bit sub-optimal, though. +// Note: package core must not depend on any other parts of the libgitops repo, only +// essentially anything under k8s.io/apimachinery is ok. -// GroupVersionKind aliases +// GroupVersionKind and ObjectID aliases type GroupKind = schema.GroupKind type GroupVersion = schema.GroupVersion type GroupVersionKind = schema.GroupVersionKind - -// Client-related Object aliases -type Object = client.Object type ObjectKey = types.NamespacedName -type ObjectList = client.ObjectList -type Patch = client.Patch - -// Client-related Option aliases -type ListOption = client.ListOption -type CreateOption = client.CreateOption -type UpdateOption = client.UpdateOption -type PatchOption = client.PatchOption -type DeleteOption = client.DeleteOption -type DeleteAllOfOption = client.DeleteAllOfOption -// Helper functions from client. -var ObjectKeyFromObject = client.ObjectKeyFromObject +// ObjectKeyFromObject returns the ObjectKey of a given metav1.Object. +func ObjectKeyFromMetav1Object(obj metav1.Object) ObjectKey { + return ObjectKey{ + Name: obj.GetName(), + Namespace: obj.GetNamespace(), + } +} // UnversionedObjectID represents an ID for an Object whose version is not known. // However, the Group, Kind, Name and optionally, Namespace is known and should diff --git a/pkg/storage/filesystem/unstructured/recognizer.go b/pkg/storage/filesystem/unstructured/recognizer.go index 68e9ce4d..b2893507 100644 --- a/pkg/storage/filesystem/unstructured/recognizer.go +++ b/pkg/storage/filesystem/unstructured/recognizer.go @@ -65,7 +65,7 @@ func (r KubeObjectRecognizer) RecognizeObjectIDs(_ string, fr serializer.FrameRe } // Create the ObjectID - id := core.NewObjectID(metaObj.GroupVersionKind(), core.ObjectKeyFromObject(metaObj)) + id := core.NewObjectID(metaObj.GroupVersionKind(), core.ObjectKeyFromMetav1Object(metaObj)) // Insert it into the set; but error if AllowDuplicates==false and it already existed. // Important: As InsertUnique mutates ids, it must be the first if case if !ids.InsertUnique(id) && !r.AllowDuplicates { From 43efc2ee262221d569607577e6ec2ed70916e9c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Tue, 2 Feb 2021 00:52:36 +0200 Subject: [PATCH 111/149] an idea for the future. --- pkg/serializer/decode.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/serializer/decode.go b/pkg/serializer/decode.go index dd496abb..af0b2c6b 100644 --- a/pkg/serializer/decode.go +++ b/pkg/serializer/decode.go @@ -139,6 +139,9 @@ func (d *decoder) decode(doc []byte, into runtime.Object, ct ContentType) (runti // opts.DecodeUnknown is not applicable in this call. In case you want to decode an object into a // *runtime.Unknown, just create a runtime.Unknown object and pass the pointer as obj into DecodeInto // and it'll work. +// +// TODO: Support decoding all frames at once into e.g. PartialMetadataLists, UnstructuredLists, or +// metav1.Lists. func (d *decoder) DecodeInto(fr FrameReader, into runtime.Object) error { // Read a frame from the FrameReader. // TODO: Make sure to test the case when doc might contain something, and err is io.EOF From 413a6f4082c88fd4ed532023e2e014e216649737 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Fri, 5 Feb 2021 01:20:28 +0200 Subject: [PATCH 112/149] Add an "OldPath" property of FileEvent; and populate that in FileWatcher. Small fixes to make moves within the repo to work properly (create a concatenated event) --- pkg/storage/filesystem/fileevents/events.go | 3 +++ .../fileevents/inotify/filewatcher.go | 27 ++++++++++++++++--- 2 files changed, 26 insertions(+), 4 deletions(-) diff --git a/pkg/storage/filesystem/fileevents/events.go b/pkg/storage/filesystem/fileevents/events.go index 38c385aa..4c4e09a4 100644 --- a/pkg/storage/filesystem/fileevents/events.go +++ b/pkg/storage/filesystem/fileevents/events.go @@ -30,6 +30,9 @@ func (e FileEventType) String() string { type FileEvent struct { Path string Type FileEventType + + // OldPath is non-empty only when Type == FileEventMove. + OldPath string } // FileEventStream is a channel of FileEvents diff --git a/pkg/storage/filesystem/fileevents/inotify/filewatcher.go b/pkg/storage/filesystem/fileevents/inotify/filewatcher.go index 162820bd..a66f1467 100644 --- a/pkg/storage/filesystem/fileevents/inotify/filewatcher.go +++ b/pkg/storage/filesystem/fileevents/inotify/filewatcher.go @@ -30,6 +30,10 @@ var combinedEvents = []combinedEvent{ {[]notify.Event{notify.InDelete, notify.InCloseWrite}, 1}, // MODIFY + DELETE => NONE {[]notify.Event{notify.InCloseWrite, notify.InDelete}, -1}, + // MOVE + MODIFY => MOVE + {[]notify.Event{notify.InMovedTo, notify.InCloseWrite}, 0}, + // MODIFY + MOVE => MOVE + {[]notify.Event{notify.InCloseWrite, notify.InMovedTo}, 1}, } type notifyEvents []notify.EventInfo @@ -193,12 +197,27 @@ func (w *FileWatcher) sendUpdate(event *fileevents.FileEvent) { // Replace the full path with the relative path for the signaling upstream event.Path = relativePath + if len(event.OldPath) != 0 { + // Do the same for event.OldPath + relativePath, err = filepath.Rel(w.dir, event.OldPath) + if err != nil { + logrus.Errorf("FileWatcher: Error occurred when computing relative path between: %s and %s: %v", w.dir, event.OldPath, err) + return + } + // Replace the full path with the relative path for the signaling upstream + event.OldPath = relativePath + } + if w.shouldSuspendEvent(event.Path) { log.Debugf("FileWatcher: Skipping suspended event %s for path: %q", event.Type, event.Path) return // Skip the suspended event } + if event.Type == fileevents.FileEventMove { + log.Debugf("FileWatcher: Sending update: %s: %q -> %q", event.Type, event.OldPath, event.Path) + } else { + log.Debugf("FileWatcher: Sending update: %s -> %q", event.Type, event.Path) + } - log.Debugf("FileWatcher: Sending update: %s -> %q", event.Type, event.Path) w.outbound <- event } @@ -342,8 +361,8 @@ func (w *FileWatcher) move(event notify.EventInfo) (moveUpdate *fileevents.FileE sourcePath, destPath = destPath, sourcePath fallthrough case notify.InMovedTo: - cache.cancel() // Cancel dispatching the cache's incomplete move - moveUpdate = &fileevents.FileEvent{Path: destPath, Type: fileevents.FileEventMove} // Register an internal, complete move instead + cache.cancel() // Cancel dispatching the cache's incomplete move + moveUpdate = &fileevents.FileEvent{Path: destPath, OldPath: sourcePath, Type: fileevents.FileEventMove} // Register an internal, complete move instead log.Tracef("FileWatcher: Detected move: %q -> %q", sourcePath, destPath) } @@ -407,7 +426,7 @@ func (c *combinedEvent) match(events notifyEvents) (notify.EventInfo, bool) { } } - if c.output > 0 { + if c.output >= 0 { return events[c.output], true } From d2c36872d543a64b35e02870d81226660f8ee6e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Fri, 5 Feb 2021 01:21:34 +0200 Subject: [PATCH 113/149] Rename SchemeLock -> GetLockedScheme to avoid func naming mismatches --- pkg/serializer/convertor.go | 2 +- pkg/serializer/decode.go | 2 +- pkg/serializer/defaulter.go | 2 +- pkg/serializer/encode.go | 2 +- pkg/serializer/patch.go | 2 +- pkg/serializer/serializer.go | 12 ++++++------ pkg/storage/backend/backend.go | 2 +- pkg/storage/client/client.go | 2 +- 8 files changed, 13 insertions(+), 13 deletions(-) diff --git a/pkg/serializer/convertor.go b/pkg/serializer/convertor.go index f6368306..d51efd30 100644 --- a/pkg/serializer/convertor.go +++ b/pkg/serializer/convertor.go @@ -34,7 +34,7 @@ type converter struct { convertor *objectConvertor } -func (c *converter) SchemeLock() LockedScheme { +func (c *converter) GetLockedScheme() LockedScheme { return c.LockedScheme } diff --git a/pkg/serializer/decode.go b/pkg/serializer/decode.go index af0b2c6b..7f6cf116 100644 --- a/pkg/serializer/decode.go +++ b/pkg/serializer/decode.go @@ -42,7 +42,7 @@ type decoder struct { opts DecodeOptions } -func (d *decoder) SchemeLock() LockedScheme { +func (d *decoder) GetLockedScheme() LockedScheme { return d.LockedScheme } diff --git a/pkg/serializer/defaulter.go b/pkg/serializer/defaulter.go index e94093cd..2b96ab44 100644 --- a/pkg/serializer/defaulter.go +++ b/pkg/serializer/defaulter.go @@ -16,7 +16,7 @@ type defaulter struct { LockedScheme } -func (d *defaulter) SchemeLock() LockedScheme { +func (d *defaulter) GetLockedScheme() LockedScheme { return d.LockedScheme } diff --git a/pkg/serializer/encode.go b/pkg/serializer/encode.go index ace63061..19f6b1ca 100644 --- a/pkg/serializer/encode.go +++ b/pkg/serializer/encode.go @@ -25,7 +25,7 @@ type encoder struct { opts EncodeOptions } -func (e *encoder) SchemeLock() LockedScheme { +func (e *encoder) GetLockedScheme() LockedScheme { return e.LockedScheme } diff --git a/pkg/serializer/patch.go b/pkg/serializer/patch.go index fdf58154..d6987334 100644 --- a/pkg/serializer/patch.go +++ b/pkg/serializer/patch.go @@ -46,7 +46,7 @@ type Patcher interface { func NewPatcher(encoder Encoder, decoder Decoder) Patcher { // It shouldn't matter if we use the LockedScheme from the encoder or decoder // TODO: Does this work with pretty encoders? - return &patcher{encoder.SchemeLock(), encoder, decoder} + return &patcher{encoder.GetLockedScheme(), encoder, decoder} } type patcher struct { diff --git a/pkg/serializer/serializer.go b/pkg/serializer/serializer.go index edb6f38c..795590b6 100644 --- a/pkg/serializer/serializer.go +++ b/pkg/serializer/serializer.go @@ -63,7 +63,7 @@ type Serializer interface { // SchemeLock exposes the underlying LockedScheme. // A Scheme provides access to the underlying runtime.Scheme, may be used for low-level access to // the "type universe" and advanced conversion/defaulting features. - SchemeLock() LockedScheme + GetLockedScheme() LockedScheme // CodecFactory provides access to the underlying CodecFactory, may be used if low-level access // is needed for encoding and decoding. @@ -85,7 +85,7 @@ type Encoder interface { EncodeForGroupVersion(fw FrameWriter, obj runtime.Object, gv schema.GroupVersion) error // SchemeLock exposes the underlying LockedScheme - SchemeLock() LockedScheme + GetLockedScheme() LockedScheme // CodecFactory exposes the underlying CodecFactory CodecFactory() *k8sserializer.CodecFactory @@ -149,7 +149,7 @@ type Decoder interface { DecodeAll(fr FrameReader) ([]runtime.Object, error) // SchemeLock exposes the underlying LockedScheme - SchemeLock() LockedScheme + GetLockedScheme() LockedScheme } // Converter is an interface that allows access to object conversion capabilities @@ -171,7 +171,7 @@ type Converter interface { ConvertToHub(in runtime.Object) (runtime.Object, error) // SchemeLock exposes the underlying LockedScheme - SchemeLock() LockedScheme + GetLockedScheme() LockedScheme } // Defaulter is a high-level interface for accessing defaulting functions in a scheme @@ -189,7 +189,7 @@ type Defaulter interface { NewDefaultedObject(gvk schema.GroupVersionKind) (runtime.Object, error) // SchemeLock exposes the underlying LockedScheme - SchemeLock() LockedScheme + GetLockedScheme() LockedScheme } // NewSerializer constructs a new serializer based on a scheme, and optionally a codecfactory @@ -226,7 +226,7 @@ type serializer struct { patcher Patcher } -func (s *serializer) SchemeLock() LockedScheme { +func (s *serializer) GetLockedScheme() LockedScheme { return s.LockedScheme } diff --git a/pkg/storage/backend/backend.go b/pkg/storage/backend/backend.go index 8bcaae7b..e9351642 100644 --- a/pkg/storage/backend/backend.go +++ b/pkg/storage/backend/backend.go @@ -117,7 +117,7 @@ func NewGeneric( } return &Generic{ // It shouldn't matter if we use the encoder's or decoder's SchemeLock - LockedScheme: encoder.SchemeLock(), + LockedScheme: encoder.GetLockedScheme(), encoder: encoder, decoder: decoder, diff --git a/pkg/storage/client/client.go b/pkg/storage/client/client.go index 7bb4e1dd..cd10200e 100644 --- a/pkg/storage/client/client.go +++ b/pkg/storage/client/client.go @@ -216,7 +216,7 @@ func (c *Generic) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAll // Scheme returns the scheme this client is using. func (c *Generic) Scheme() *kruntime.Scheme { - return c.Backend().Encoder().SchemeLock().Scheme() + return c.Backend().Encoder().GetLockedScheme().Scheme() } // RESTMapper returns the rest this client is using. For now, this returns nil, so don't use. From 33565c8243ac38d2fb26b925540213d9565039d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Fri, 5 Feb 2021 01:24:51 +0200 Subject: [PATCH 114/149] Remove commit from versionref; can't really support commits now: it's very hard to index all unstructured data from read-only commits, and hence support it properly. --- pkg/storage/core/interfaces.go | 13 +++--------- pkg/storage/core/versionref.go | 39 +--------------------------------- 2 files changed, 4 insertions(+), 48 deletions(-) diff --git a/pkg/storage/core/interfaces.go b/pkg/storage/core/interfaces.go index 8e93b87d..9b0ea4fb 100644 --- a/pkg/storage/core/interfaces.go +++ b/pkg/storage/core/interfaces.go @@ -41,18 +41,11 @@ type ObjectID interface { GroupVersionKind() GroupVersionKind } -// VersionRef is an interface that describes a reference to a specific version +// VersionRef is an interface that describes a reference to a specific version (for now; branch) // of Objects in a Storage or Client. type VersionRef interface { - // String returns the commit or branch name. - String() string - // IsWritable determines if the VersionRef points to such a state where it - // is possible to write on top of it, i.e. as in the case of a Git branch. - // - // A specific Git commit, however, isn't considered writable, as it points - // to a specific point in time that can't just be rewritten, (assuming this - // library only is additive, which it is). - IsWritable() bool + // Branch returns the branch name. + Branch() string // IsZeroValue determines if this VersionRef is the "zero value", which means // that the caller should figure out how to handle that the user did not // give specific opinions of what version of the Object to get. diff --git a/pkg/storage/core/versionref.go b/pkg/storage/core/versionref.go index c9b3892b..9598064e 100644 --- a/pkg/storage/core/versionref.go +++ b/pkg/storage/core/versionref.go @@ -2,7 +2,6 @@ package core import ( "context" - "errors" ) var versionRefKey = versionRefKeyImpl{} @@ -29,52 +28,16 @@ func GetVersionRef(ctx context.Context) VersionRef { return r } -var ErrInvalidVersionRefType = errors.New("invalid version ref type") - // NewBranchRef creates a new VersionRef for a given branch. It is // valid for the branch to be ""; in this case it means the "zero // value", or unspecified branch to be more precise, where the caller // can choose how to handle. func NewBranchRef(branch string) VersionRef { return branchRef{branch} } -// NewCommitRef creates a new VersionRef for the given commit. The -// commit must uniquely define a certain revision precisely. It must -// not be an empty string. -func NewCommitRef(commit string) (VersionRef, error) { - if len(commit) == 0 { - return nil, errors.New("commit must not be an empty string") - } - return commitRef{commit}, nil -} - -// MustNewCommitRef runs NewCommitRef, but panics on errors -func MustNewCommitRef(commit string) VersionRef { - ref, err := NewCommitRef(commit) - if err != nil { - panic(err) - } - return ref -} - type branchRef struct{ branch string } -func (r branchRef) String() string { return r.branch } - -// A branch is considered writable, as commits can be added to it by libgitops -func (branchRef) IsWritable() bool { return true } +func (r branchRef) Branch() string { return r.branch } // A branch is considered the zero value if the branch is an empty string, // which it is e.g. when there was no VersionRef associated with a Context. func (r branchRef) IsZeroValue() bool { return r.branch == "" } - -type commitRef struct{ commit string } - -func (r commitRef) String() string { return r.commit } - -// A commit is not considered writable, as it is only a read snapshot of -// a specific point in time. -func (commitRef) IsWritable() bool { return false } - -// IsZeroValue should always return false for commits; as commit is mandatory -// to be a non-empty string. -func (r commitRef) IsZeroValue() bool { return r.commit == "" } From 9c503130dea7f467495751ca2ec1474797113f26 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Fri, 5 Feb 2021 01:27:31 +0200 Subject: [PATCH 115/149] Change to GetLockedScheme --- pkg/storage/filesystem/unstructured/recognizer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/storage/filesystem/unstructured/recognizer.go b/pkg/storage/filesystem/unstructured/recognizer.go index b2893507..415fc4e0 100644 --- a/pkg/storage/filesystem/unstructured/recognizer.go +++ b/pkg/storage/filesystem/unstructured/recognizer.go @@ -60,7 +60,7 @@ func (r KubeObjectRecognizer) RecognizeObjectIDs(_ string, fr serializer.FrameRe if metaObj.Name == "" { return nil, fmt.Errorf(".metadata.name field must not be empty") } - if !r.AllowUnrecognized && !r.Decoder.SchemeLock().Scheme().Recognizes(gvk) { + if !r.AllowUnrecognized && !r.Decoder.GetLockedScheme().Scheme().Recognizes(gvk) { return nil, fmt.Errorf("GroupVersionKind %v not recognized by the scheme", gvk) } From c5fb9616ae485e6fc6f4696e082929ffa7048f0f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Fri, 5 Feb 2021 01:49:43 +0200 Subject: [PATCH 116/149] Adapt to VersionRef changes --- pkg/storage/client/transactional/client.go | 39 +++---------------- .../transactional/distributed/client.go | 11 +----- 2 files changed, 8 insertions(+), 42 deletions(-) diff --git a/pkg/storage/client/transactional/client.go b/pkg/storage/client/transactional/client.go index caff0c96..a0d3658b 100644 --- a/pkg/storage/client/transactional/client.go +++ b/pkg/storage/client/transactional/client.go @@ -78,15 +78,8 @@ func (c *Generic) List(ctx context.Context, list client.ObjectList, opts ...clie } func (c *Generic) lockForReading(ctx context.Context, operation func() error) error { - ref := core.GetVersionRef(ctx) - if !ref.IsWritable() { - // Never block reads for read-only VersionRefs. We know nobody can change - // them during the read operation, so they should be race condition-free. - return operation() - } - // If the VersionRef is writable; treat it as a branch and lock it to avoid - // race conditions. - return c.lockAndReadBranch(ref.String(), operation) + // Get the branch from the context, and lock it + return c.lockAndReadBranch(core.GetVersionRef(ctx).Branch(), operation) } func (c *Generic) lockAndReadBranch(branch string, callback func() error) error { @@ -237,27 +230,12 @@ func (c *Generic) BranchTransaction(ctx context.Context, headBranch string, opts return tx } -func (c *Generic) validateCtx(ctx context.Context) (core.VersionRef, error) { - // Check so versionref is writable - ref := core.GetVersionRef(ctx) - if !ref.IsWritable() { - return nil, fmt.Errorf("must not give a writable VersionRef to (Branch)Transaction()") - } - // Just return its - return ref, nil -} - func (c *Generic) transaction(ctx context.Context, opts ...TxOption) (Tx, error) { - // Validate the versionref from the context - ref, err := c.validateCtx(ctx) - if err != nil { - return nil, err - } // Parse options o := defaultTxOptions().ApplyOptions(opts) - branch := ref.String() + branch := core.GetVersionRef(ctx).Branch() info := TxInfo{ Base: branch, Head: branch, @@ -267,7 +245,7 @@ func (c *Generic) transaction(ctx context.Context, opts ...TxOption) (Tx, error) ctxWithDeadline, cleanupFunc := c.initTx(ctx, info) // Run pre-tx checks - err = c.TransactionHookChain().PreTransactionHook(ctxWithDeadline, info) + err := c.TransactionHookChain().PreTransactionHook(ctxWithDeadline, info) return &txImpl{ &txCommon{ @@ -282,12 +260,7 @@ func (c *Generic) transaction(ctx context.Context, opts ...TxOption) (Tx, error) } func (c *Generic) branchTransaction(ctx context.Context, headBranch string, opts ...TxOption) (BranchTx, error) { - // Validate the versionref from the context - ref, err := c.validateCtx(ctx) - if err != nil { - return nil, err - } - baseBranch := ref.String() + baseBranch := core.GetVersionRef(ctx).Branch() // Append random bytes to the end of the head branch if it ends with a dash if strings.HasSuffix(headBranch, "-") { @@ -320,7 +293,7 @@ func (c *Generic) branchTransaction(ctx context.Context, headBranch string, opts ctxWithDeadline, cleanupFunc := c.initTx(ctxWithHeadBranch, info) // Run pre-tx checks and create the new branch - err = utilerrs.NewAggregate([]error{ + err := utilerrs.NewAggregate([]error{ c.TransactionHookChain().PreTransactionHook(ctxWithDeadline, info), c.manager.CreateBranch(ctxWithDeadline, headBranch), }) diff --git a/pkg/storage/client/transactional/distributed/client.go b/pkg/storage/client/transactional/distributed/client.go index d0b0893a..5a79b274 100644 --- a/pkg/storage/client/transactional/distributed/client.go +++ b/pkg/storage/client/transactional/distributed/client.go @@ -71,12 +71,7 @@ func (c *Generic) List(ctx context.Context, list client.ObjectList, opts ...clie } func (c *Generic) readWhenPossible(ctx context.Context, operation func() error) error { - ref := core.GetVersionRef(ctx) - // If the ref is not writable, we don't have to worry about race conditions - if !ref.IsWritable() { - return operation() - } - branch := ref.String() + branch := c.branchFromCtx(ctx) // Check if we need to do a pull before if c.needsResync(branch, c.opts.CacheValidDuration) { @@ -238,10 +233,8 @@ func (c *Generic) Remote() Remote { return c.remote } -// note: this must ONLY be called from such functions where it is guaranteed that the -// ctx contains a branch versionref. func (c *Generic) branchFromCtx(ctx context.Context) string { - return core.GetVersionRef(ctx).String() + return core.GetVersionRef(ctx).Branch() } func (c *Generic) returnErr(err error) error { From d08bb28b766ed287433a279a156315f497574d2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Fri, 5 Feb 2021 01:54:33 +0200 Subject: [PATCH 117/149] Remove the ObjectID set as it's redundant; focus more on the unversioned part instead. --- pkg/storage/core/interfaces.go | 3 + pkg/storage/core/objectid.go | 1 + pkg/storage/core/set_objectid.go | 82 ----------- pkg/storage/core/set_unversioned_objectid.go | 139 ++++++++++++++++--- 4 files changed, 122 insertions(+), 103 deletions(-) delete mode 100644 pkg/storage/core/set_objectid.go diff --git a/pkg/storage/core/interfaces.go b/pkg/storage/core/interfaces.go index 9b0ea4fb..75b5b6a5 100644 --- a/pkg/storage/core/interfaces.go +++ b/pkg/storage/core/interfaces.go @@ -38,6 +38,9 @@ type UnversionedObjectID interface { type ObjectID interface { UnversionedObjectID + // WithoutVersion unwraps the underlying UnversionedObjectID; so it can + // be used for e.g. equality (e.g. as a map key) operations. + WithoutVersion() UnversionedObjectID GroupVersionKind() GroupVersionKind } diff --git a/pkg/storage/core/objectid.go b/pkg/storage/core/objectid.go index f23c4db6..134b9127 100644 --- a/pkg/storage/core/objectid.go +++ b/pkg/storage/core/objectid.go @@ -36,6 +36,7 @@ type objectID struct { version string } +func (o objectID) WithoutVersion() UnversionedObjectID { return o.unversionedObjectID } func (o objectID) GroupVersionKind() schema.GroupVersionKind { return o.gk.WithVersion(o.version) } func (o objectID) String() string { if o.key.Namespace == "" { diff --git a/pkg/storage/core/set_objectid.go b/pkg/storage/core/set_objectid.go deleted file mode 100644 index c777b1d7..00000000 --- a/pkg/storage/core/set_objectid.go +++ /dev/null @@ -1,82 +0,0 @@ -package core - -import ( - "k8s.io/apimachinery/pkg/util/sets" -) - -// This is a copy of set_unversioned_objectid.go; needed as Go doesn't have generics. - -// ObjectIDSet is a set of ObjectIDs -type ObjectIDSet interface { - // Has returns true if the object ID is in the set - Has(id ObjectID) bool - // HasAny returns true if any of the object IDs are in the set - HasAny(ids ...ObjectID) bool - // InsertUnique returns false if any of the object IDs are in the set already, - // or true if none of the given object IDs exist in the set yet. If the return value - // is true, the IDs have been added to the set. - InsertUnique(ids ...ObjectID) bool - // Insert inserts the given object IDs into the set - Insert(ids ...ObjectID) ObjectIDSet - // Delete deletes the given object IDs from the set - Delete(ids ...ObjectID) ObjectIDSet - // List lists the given object IDs of the set - List() []ObjectID - // Len returns the length of the set - Len() int -} - -// NewObjectIDSet creates a new ObjectIDSet -func NewObjectIDSet(ids ...ObjectID) ObjectIDSet { - return (make(objectIDSet, len(ids))).Insert(ids...) -} - -type objectIDSet map[ObjectID]sets.Empty - -func (s objectIDSet) Has(id ObjectID) bool { - _, found := s[id] - return found -} - -func (s objectIDSet) HasAny(ids ...ObjectID) bool { - for _, id := range ids { - if s.Has(id) { - return true - } - } - return false -} - -func (s objectIDSet) InsertUnique(ids ...ObjectID) bool { - if s.HasAny(ids...) { - return false - } - s.Insert(ids...) - return true -} - -func (s objectIDSet) Insert(ids ...ObjectID) ObjectIDSet { - for _, id := range ids { - s[id] = sets.Empty{} - } - return s -} - -func (s objectIDSet) Delete(ids ...ObjectID) ObjectIDSet { - for _, id := range ids { - delete(s, id) - } - return s -} - -func (s objectIDSet) List() []ObjectID { - list := make([]ObjectID, 0, len(s)) - for id := range s { - list = append(list, id) - } - return list -} - -func (s objectIDSet) Len() int { - return len(s) -} diff --git a/pkg/storage/core/set_unversioned_objectid.go b/pkg/storage/core/set_unversioned_objectid.go index 99a26418..18c553c8 100644 --- a/pkg/storage/core/set_unversioned_objectid.go +++ b/pkg/storage/core/set_unversioned_objectid.go @@ -1,34 +1,83 @@ package core import ( + "fmt" + "k8s.io/apimachinery/pkg/util/sets" ) -// This is a copy of set_unversioned_objectid.go; needed as Go doesn't have generics. - -// UnversionedObjectIDSet is a set of UnversionedObjectIDs +// UnversionedObjectIDSet is a set of UnversionedObjectIDs. +// The underlying data storage is a map[UnversionedObjectID]struct{}. +// +// This interface should be as similar as possible to +// k8s.io/apimachinery/pkg/util/sets. type UnversionedObjectIDSet interface { - // Has returns true if the object ID is in the set + // Has returns true if the object ID is in the set. Has(id UnversionedObjectID) bool - // HasAny returns true if any of the object IDs are in the set + // HasAny returns true if any of the object IDs are in the set. HasAny(ids ...UnversionedObjectID) bool - // InsertUnique returns false if any of the object IDs are in the set already, - // or true if none of the given object IDs exist in the set yet. If the return value - // is true, the IDs have been added to the set. - InsertUnique(ids ...UnversionedObjectID) bool - // Insert inserts the given object IDs into the set + + // Insert inserts the given object IDs into the set. Returns itself. + // WARNING: This mutates the receiver. Issue a Copy() before if not desired. Insert(ids ...UnversionedObjectID) UnversionedObjectIDSet - // Delete deletes the given object IDs from the set + // InsertSet inserts the contents of s2 into itself, and returns itself. + // WARNING: This mutates the receiver. Issue a Copy() before if not desired. + InsertSet(s2 UnversionedObjectIDSet) UnversionedObjectIDSet + + // Delete deletes the given object IDs from the set. Returns itself. + // WARNING: This mutates the receiver. Issue a Copy() before if not desired. Delete(ids ...UnversionedObjectID) UnversionedObjectIDSet - // List lists the given object IDs of the set + // DeleteSet deletes the contents of s2 from itself, and returns itself. + // WARNING: This mutates the receiver. Issue a Copy() before if not desired. + DeleteSet(s2 UnversionedObjectIDSet) UnversionedObjectIDSet + + // List lists the given object IDs of the set, in no particular order. + // List requires O(n) extra memory, when n == Len(). Use ForEach for no copying. List() []UnversionedObjectID + // ForEach runs fn for each item in the set. Does not copy the whole list. + // Uses a for-range underneath, so it is even safe to delete items underneath, ref: + // https://stackoverflow.com/questions/23229975/is-it-safe-to-remove-selected-keys-from-map-within-a-range-loop + // If an error occurs, the rest of the IDs are not traversed. Iteration order is random. + ForEach(fn func(id UnversionedObjectID) error) error + // Len returns the length of the set Len() int + // Copy does a shallow copy of set element; but performs a deep copy of the + // underlying map itself; so mutating operations don't propagate unwantedly. + Copy() UnversionedObjectIDSet + + // Difference returns a set of objects that are not in s2 + // For example: + // s1 = {a1, a2, a3} + // s2 = {a1, a2, a4, a5} + // s1.Difference(s2) = {a3} + // s2.Difference(s1) = {a4, a5} + Difference(s2 UnversionedObjectIDSet) UnversionedObjectIDSet + + // String returns a human-friendly representation + String() string } // NewUnversionedObjectIDSet creates a new UnversionedObjectIDSet func NewUnversionedObjectIDSet(ids ...UnversionedObjectID) UnversionedObjectIDSet { - return (make(unversionedObjectIDSet, len(ids))).Insert(ids...) + return NewUnversionedObjectIDSetSized(len(ids), ids...) +} + +// NewUnversionedObjectIDSet creates a new UnversionedObjectIDSet for a given map length. +func NewUnversionedObjectIDSetSized(len int, ids ...UnversionedObjectID) UnversionedObjectIDSet { + return (make(unversionedObjectIDSet, len)).Insert(ids...) +} + +// UnversionedObjectIDSetFromVersionedSlice transforms a slice of ObjectIDs to +// an unversioned set. +func UnversionedObjectIDSetFromVersionedSlice(versioned []ObjectID) UnversionedObjectIDSet { + result := NewUnversionedObjectIDSetSized(len(versioned)) + for _, id := range versioned { + // Important: We should "unwrap" to a plain UnversionedObjectID here, so + // equality works properly in e.g. map keys. + result.Insert(id.WithoutVersion()) + } + return result } type unversionedObjectIDSet map[UnversionedObjectID]sets.Empty @@ -47,14 +96,6 @@ func (s unversionedObjectIDSet) HasAny(ids ...UnversionedObjectID) bool { return false } -func (s unversionedObjectIDSet) InsertUnique(ids ...UnversionedObjectID) bool { - if s.HasAny(ids...) { - return false - } - s.Insert(ids...) - return true -} - func (s unversionedObjectIDSet) Insert(ids ...UnversionedObjectID) UnversionedObjectIDSet { for _, id := range ids { s[id] = sets.Empty{} @@ -62,6 +103,15 @@ func (s unversionedObjectIDSet) Insert(ids ...UnversionedObjectID) UnversionedOb return s } +// InsertSet inserts the contents of s2 into itself, and returns itself. +func (s unversionedObjectIDSet) InsertSet(s2 UnversionedObjectIDSet) UnversionedObjectIDSet { + _ = s2.ForEach(func(id UnversionedObjectID) error { + s[id] = sets.Empty{} + return nil + }) + return s +} + func (s unversionedObjectIDSet) Delete(ids ...UnversionedObjectID) UnversionedObjectIDSet { for _, id := range ids { delete(s, id) @@ -69,6 +119,15 @@ func (s unversionedObjectIDSet) Delete(ids ...UnversionedObjectID) UnversionedOb return s } +// DeleteSet deletes the contents of s2 from itself, and returns itself. +func (s unversionedObjectIDSet) DeleteSet(s2 UnversionedObjectIDSet) UnversionedObjectIDSet { + _ = s2.ForEach(func(id UnversionedObjectID) error { + delete(s, id) + return nil + }) + return s +} + func (s unversionedObjectIDSet) List() []UnversionedObjectID { list := make([]UnversionedObjectID, 0, len(s)) for id := range s { @@ -77,6 +136,44 @@ func (s unversionedObjectIDSet) List() []UnversionedObjectID { return list } +// ForEach runs fn for each item in the set. Does not copy the whole list. +func (s unversionedObjectIDSet) ForEach(fn func(id UnversionedObjectID) error) (err error) { + for key := range s { + if err = fn(key); err != nil { + return + } + } + return +} + func (s unversionedObjectIDSet) Len() int { return len(s) } + +func (s unversionedObjectIDSet) Copy() UnversionedObjectIDSet { + result := make(unversionedObjectIDSet, s.Len()) + for id := range s { + result.Insert(id) + } + return result +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s unversionedObjectIDSet) Difference(s2 UnversionedObjectIDSet) UnversionedObjectIDSet { + result := NewUnversionedObjectIDSet() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +func (s unversionedObjectIDSet) String() string { + return fmt.Sprintf("UnversionedObjectIDSet (len=%d): %v", s.Len(), s.List()) +} From 1dd97f5b7f1a28ab92034c8eaf436f4943ae58b6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Fri, 5 Feb 2021 01:55:36 +0200 Subject: [PATCH 118/149] Adapt to set changes. --- pkg/storage/client/client.go | 7 ++++--- pkg/storage/filesystem/filefinder_simple.go | 8 ++++---- .../filesystem/unstructured/recognizer.go | 16 +++++++++++----- 3 files changed, 19 insertions(+), 12 deletions(-) diff --git a/pkg/storage/client/client.go b/pkg/storage/client/client.go index cd10200e..e2488e2e 100644 --- a/pkg/storage/client/client.go +++ b/pkg/storage/client/client.go @@ -96,7 +96,7 @@ func (c *Generic) List(ctx context.Context, list ObjectList, opts ...ListOption) if err != nil { return err } - allIDs.Insert(ids.List()...) + allIDs.InsertSet(ids) } // Populate objs through the given (non-buffered) channel @@ -250,9 +250,10 @@ func createUnstructuredObject(gvk core.GroupVersionKind) newObjectFunc { func (c *Generic) processKeys(ctx context.Context, ids core.UnversionedObjectIDSet, filterOpts *filter.FilterOptions, fn newObjectFunc, output chan Object) error { goroutines := []func() error{} - for _, id := range ids.List() { + _ = ids.ForEach(func(id core.UnversionedObjectID) error { goroutines = append(goroutines, c.processKey(ctx, id, filterOpts, fn, output)) - } + return nil + }) defer close(output) diff --git a/pkg/storage/filesystem/filefinder_simple.go b/pkg/storage/filesystem/filefinder_simple.go index ae17243b..36ce3f10 100644 --- a/pkg/storage/filesystem/filefinder_simple.go +++ b/pkg/storage/filesystem/filefinder_simple.go @@ -190,8 +190,8 @@ func (f *SimpleFileFinder) ListObjectIDs(ctx context.Context, gk core.GroupKind, if err != nil { return nil, err } - // Map the names to UnversionedObjectIDs - ids := make([]core.UnversionedObjectID, 0, len(entries)) + // Map the names to UnversionedObjectIDs. We already know how many entries. + ids := core.NewUnversionedObjectIDSetSized(len(entries)) for _, entry := range entries { // Loop through all entries, and make sure they are sanitized .metadata.name's if f.opts.SubDirectoryFileName != "" { @@ -211,9 +211,9 @@ func (f *SimpleFileFinder) ListObjectIDs(ctx context.Context, gk core.GroupKind, entry = strings.TrimSuffix(entry, ext) } // If we got this far, add the key to the list - ids = append(ids, core.NewUnversionedObjectID(gk, core.ObjectKey{Name: entry, Namespace: namespace})) + ids.Insert(core.NewUnversionedObjectID(gk, core.ObjectKey{Name: entry, Namespace: namespace})) } - return core.NewUnversionedObjectIDSet(ids...), nil + return ids, nil } func readDir(ctx context.Context, fs Filesystem, dir string) ([]string, error) { diff --git a/pkg/storage/filesystem/unstructured/recognizer.go b/pkg/storage/filesystem/unstructured/recognizer.go index 415fc4e0..931abac9 100644 --- a/pkg/storage/filesystem/unstructured/recognizer.go +++ b/pkg/storage/filesystem/unstructured/recognizer.go @@ -33,12 +33,13 @@ type KubeObjectRecognizer struct { AllowDuplicates bool } -func (r KubeObjectRecognizer) RecognizeObjectIDs(_ string, fr serializer.FrameReader) (core.ObjectIDSet, error) { +func (r KubeObjectRecognizer) RecognizeObjectIDs(_ string, fr serializer.FrameReader) ([]core.ObjectID, error) { if r.Decoder == nil { return nil, errors.New("programmer error: KubeObjectRecognizer.Decoder is nil") } - ids := core.NewObjectIDSet() + ids := []core.ObjectID{} + seen := map[core.ObjectID]struct{}{} for { metaObj := &metav1.PartialObjectMetadata{} err := r.Decoder.DecodeInto(fr, metaObj) @@ -66,11 +67,16 @@ func (r KubeObjectRecognizer) RecognizeObjectIDs(_ string, fr serializer.FrameRe // Create the ObjectID id := core.NewObjectID(metaObj.GroupVersionKind(), core.ObjectKeyFromMetav1Object(metaObj)) - // Insert it into the set; but error if AllowDuplicates==false and it already existed. - // Important: As InsertUnique mutates ids, it must be the first if case - if !ids.InsertUnique(id) && !r.AllowDuplicates { + // Check if this has been seen before + _, idSeen := seen[id] + // If this ID has been seen before, but duplicates are disallowed, error + if idSeen && !r.AllowDuplicates { return nil, fmt.Errorf("invalid file: two Objects with the same ID: %s", id) } + // Add the ID to the list + ids = append(ids, id) + // Now this ID has been seen + seen[id] = struct{}{} } return ids, nil From 25053f370d4bde4d80dc36f8e7ef388eda66ccc7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Fri, 5 Feb 2021 02:41:07 +0200 Subject: [PATCH 119/149] Support multi-frame files, duplicate IDs across files, and various VersionRefs in unstructured(event).Storage. --- pkg/storage/event/event.go | 6 + .../filesystem/unstructured/event/storage.go | 170 ++++++----- .../unstructured/filefinder_mapped.go | 277 ++++++++++++++---- .../filesystem/unstructured/interfaces.go | 64 ++-- .../filesystem/unstructured/mapped_cache.go | 219 +++++++++++--- .../filesystem/unstructured/storage.go | 151 +++++----- 6 files changed, 589 insertions(+), 298 deletions(-) diff --git a/pkg/storage/event/event.go b/pkg/storage/event/event.go index 3f57fdb2..3967b2dc 100644 --- a/pkg/storage/event/event.go +++ b/pkg/storage/event/event.go @@ -17,6 +17,7 @@ const ( ObjectEventUpdate // 2 ObjectEventDelete // 3 ObjectEventSync // 4 + ObjectEventError // 5 ) func (o ObjectEventType) String() string { @@ -31,6 +32,8 @@ func (o ObjectEventType) String() string { return "DELETE" case 4: return "SYNC" + case 5: + return "ERROR" } // Should never happen @@ -42,6 +45,9 @@ func (o ObjectEventType) String() string { type ObjectEvent struct { ID core.UnversionedObjectID Type ObjectEventType + // Error is only non-nil if Type == ObjectEventError. The receiver + // must check/respect the error if set. + Error error } // ObjectEventStream is a channel of ObjectEvents diff --git a/pkg/storage/filesystem/unstructured/event/storage.go b/pkg/storage/filesystem/unstructured/event/storage.go index 0289e2dd..fa8a9c06 100644 --- a/pkg/storage/filesystem/unstructured/event/storage.go +++ b/pkg/storage/filesystem/unstructured/event/storage.go @@ -62,11 +62,7 @@ func NewManifest( // NewGeneric is an extended Storage implementation, which // together with the provided ObjectRecognizer and FileEventsEmitter listens for // file events, keeps the mappings of the unstructured.Storage's unstructured.FileFinder -// in sync (s must use the mapped variant), and sends high-level ObjectEvents -// upstream. -// -// Note: This WatchStorage only works for one-frame files (i.e. only one YAML document -// per file is supported). +// in sync, and sends high-level ObjectEvents upstream. func NewGeneric( s unstructured.Storage, emitter fileevents.Emitter, @@ -98,13 +94,13 @@ type GenericStorageOptions struct { // Generic implements unstructuredevent.Storage. var _ Storage = &Generic{} -// Generic is an extended raw.Storage implementation, which provides a watcher -// for watching changes in the directory managed by the embedded Storage's RawStorage. -// If the RawStorage is a MappedRawStorage instance, it's mappings will automatically -// be updated by the WatchStorage. Update events are sent to the given event stream. -// Note: This WatchStorage only works for one-frame files (i.e. only one YAML document -// per file is supported). -// TODO: Update description +// Generic is an extended Storage implementation, which +// together with the provided ObjectRecognizer and FileEventsEmitter listens for +// file events, keeps the mappings of the unstructured.Storage's unstructured.FileFinder +// in sync, and sends high-level ObjectEvents upstream. +// +// This implementation does not support different VersionRefs, but always stays on +// the "zero value" "" branch. type Generic struct { unstructured.Storage // the filesystem events emitter @@ -147,29 +143,38 @@ func (s *Generic) WatchForObjectEvents(ctx context.Context, into event.ObjectEve // at all before events start happening, the reporting might not work as it should if s.opts.SyncAtStart { // Disregard the changed files at Sync. - if _, err := s.Sync(ctx); err != nil { + if _, _, err := s.Sync(ctx); err != nil { return err } } return nil // all ok } -func (s *Generic) Sync(ctx context.Context) ([]unstructured.ChecksumPathID, error) { +// Sync extends the underlying unstructured.Storage.Sync(), but optionally also +// sends special "SYNC" and "ERROR" events to the returned "successful" and "duplicates" +// sets, respectively. +func (s *Generic) Sync(ctx context.Context) (successful, duplicates core.UnversionedObjectIDSet, err error) { // Sync the underlying UnstructuredStorage, and see what files had changed since last sync - changedObjects, err := s.Storage.Sync(ctx) + successful, duplicates, err = s.Storage.Sync(ctx) if err != nil { - return nil, err + return nil, nil, err } - // Send special "sync" events for each of the changed objects, if configured + // Send special "sync" or "error" events for each of the changed objects, if configured if s.opts.EmitSyncEvent { - for _, changedObject := range changedObjects { + _ = successful.ForEach(func(id core.UnversionedObjectID) error { // Send a special "sync" event for this ObjectID to the events channel - s.sendEvent(event.ObjectEventSync, changedObject.ID) - } + s.sendEvent(event.ObjectEventSync, id) + return nil + }) + _ = duplicates.ForEach(func(id core.UnversionedObjectID) error { + // Send an error upstream for the duplicate + s.sendError(id, fmt.Errorf("%w: %s", unstructured.ErrTrackingDuplicate, id)) + return nil + }) } - return changedObjects, nil + return } // Write writes the given content to the resource indicated by the ID. @@ -260,63 +265,67 @@ func (s *Generic) monitorFunc() error { } func (s *Generic) handleDelete(ctx context.Context, ev *fileevents.FileEvent) error { - // The object is deleted, so we need to do a reverse-lookup of what kind of object - // was there earlier, based on the path. This assumes that the filefinder organizes - // the known objects in such a way that it is able to do the reverse-lookup. For - // mapped FileFinders, by this point the path should still be in the local cache, - // which should make us able to get the ID before deleted from the cache. - objectID, err := unstructured.SingleObjectAt(ctx, s.UnstructuredFileFinder(), ev.Path) - if err != nil { - return fmt.Errorf("failed to reverse lookup ID for deleted file %q: %w", ev.Path, err) - } - - // Remove the mapping from the FileFinder cache for this ID as it's now deleted - s.deleteMapping(ctx, objectID) - // Send the delete event to the channel - s.sendEvent(event.ObjectEventDelete, objectID) - return nil + // Delete the given path from the FileFinder; loop through the deleted objects + return s.UnstructuredFileFinder().DeleteMapping(ctx, ev.Path).ForEach(func(id core.UnversionedObjectID) error { + // Send the delete event to the channel + s.sendEvent(event.ObjectEventDelete, id) + return nil + }) } func (s *Generic) handleModifyMove(ctx context.Context, ev *fileevents.FileEvent) error { - // Read and recognize the file - versionedID, err := unstructured.ReadAndRecognizeFile( - ctx, - s.UnstructuredFileFinder().Filesystem(), - s.UnstructuredFileFinder().ContentTyper(), - s.ObjectRecognizer(), - ev.Path, - ) + fileFinder := s.UnstructuredFileFinder() + + // If the file was moved, move the cached mapping(s) too + if ev.Type == fileevents.FileEventMove { + // There's no need to check if this move actually was performed; as + // if OldPath did not exist previously, the code below will just treat + // it as a Create. + _ = fileFinder.MoveFile(ctx, ev.OldPath, ev.Path) + } + + // Recognize the contents of the file + idSet, cp, alreadyCached, err := unstructured.RecognizeIDsInFile(ctx, fileFinder, s.ObjectRecognizer(), ev.Path) if err != nil { return err } + // If the file is already up-to-date as per the checksum, we're all fine + if alreadyCached { + return nil + } - // If the file was just moved around, just overwrite the earlier mapping - if ev.Type == fileevents.FileEventMove { - // This assumes that the file content does not change in the move - // operation. TODO: document this as a requirement for the Emitter. - s.setMapping(ctx, versionedID, ev.Path) + // Store this new mapping in the cache + added, duplicates, removed := fileFinder.SetMapping(ctx, *cp, idSet) - // Internal move events are a no-op + // Send added events + _ = added.ForEach(func(id core.UnversionedObjectID) error { + // Send a create event to the channel + s.sendEvent(event.ObjectEventCreate, id) return nil - } + }) + // Send modify events. Do not mutate idSet unnecessarily. + _ = idSet.Copy(). + DeleteSet(added). + DeleteSet(removed). + DeleteSet(duplicates). + ForEach(func(id core.UnversionedObjectID) error { + // Send a update event to the channel + s.sendEvent(event.ObjectEventUpdate, id) + return nil + }) + // Send removed events + _ = removed.ForEach(func(id core.UnversionedObjectID) error { + // Send a delete event to the channel + s.sendEvent(event.ObjectEventDelete, id) + return nil + }) + // Send duplicate error events + _ = duplicates.ForEach(func(id core.UnversionedObjectID) error { + // Send an error event to the channel + s.sendError(id, fmt.Errorf("%w: %q, %s", unstructured.ErrTrackingDuplicate, ev.Path, id)) + return nil + }) - // Determine if this object already existed in the fileFinder's cache, - // in order to find out if the object was created or modified (default). - // TODO: In the future, maybe support multiple files pointing to the same - // ObjectID? Case in point here is e.g. a Modify event for a known path that - // changes the underlying ObjectID. - objectEvent := event.ObjectEventUpdate - // Set the mapping if it didn't exist before; assume this is a Create event - if _, ok := s.UnstructuredFileFinder().GetMapping(ctx, versionedID); !ok { - // This is what actually determines if an Object is created, - // so update the event to update.ObjectEventCreate here - objectEvent = event.ObjectEventCreate - } - // Update the mapping between this object and path (this updates - // the checksum underneath too). - s.setMapping(ctx, versionedID, ev.Path) - // Send the event to the channel - s.sendEvent(objectEvent, versionedID) return nil } @@ -328,24 +337,11 @@ func (s *Generic) sendEvent(eventType event.ObjectEventType, id core.Unversioned } } -// setMapping registers a mapping between the given object and the specified path, if raw is a -// MappedRawStorage. If a given mapping already exists between this object and some path, it -// will be overridden with the specified new path -func (s *Generic) setMapping(ctx context.Context, id core.UnversionedObjectID, path string) { - // Get the current checksum of the new file - checksum, err := s.UnstructuredFileFinder().Filesystem().Checksum(ctx, path) - if err != nil { - logrus.Errorf("Unexpected error when getting checksum of file %q: %v", path, err) - return +func (s *Generic) sendError(id core.UnversionedObjectID, err error) { + logrus.Tracef("Generic: Sending error event for %s: %v", id, err) + s.outbound <- &event.ObjectEvent{ + ID: id, + Type: event.ObjectEventError, + Error: err, } - // Register the current state in the cache - s.UnstructuredFileFinder().SetMapping(ctx, id, unstructured.ChecksumPath{ - Path: path, - Checksum: checksum, - }) -} - -// deleteMapping removes a mapping a file that doesn't exist -func (s *Generic) deleteMapping(ctx context.Context, id core.UnversionedObjectID) { - s.UnstructuredFileFinder().DeleteMapping(ctx, id) } diff --git a/pkg/storage/filesystem/unstructured/filefinder_mapped.go b/pkg/storage/filesystem/unstructured/filefinder_mapped.go index 6075aaa2..a8d45ff1 100644 --- a/pkg/storage/filesystem/unstructured/filefinder_mapped.go +++ b/pkg/storage/filesystem/unstructured/filefinder_mapped.go @@ -3,8 +3,10 @@ package unstructured import ( "context" "errors" + "fmt" "sync" + "github.com/sirupsen/logrus" "github.com/weaveworks/libgitops/pkg/storage/core" "github.com/weaveworks/libgitops/pkg/storage/filesystem" utilerrs "k8s.io/apimachinery/pkg/util/errors" @@ -14,6 +16,8 @@ import ( var ( // ErrNotTracked is returned when the requested resource wasn't found. ErrNotTracked = errors.New("untracked object") + // ErrTrackingDuplicate is returned when a duplicate of two object IDs in the cache have occurred + ErrTrackingDuplicate = errors.New("duplicate object ID; already exists in an other file") ) // GenericFileFinder implements FileFinder. @@ -32,10 +36,8 @@ func NewGenericFileFinder(contentTyper filesystem.ContentTyper, fs filesystem.Fi return &GenericFileFinder{ contentTyper: contentTyper, fs: fs, - // TODO: Support multiple branches - branch: &branchImpl{}, - pathToIDs: make(map[string]core.UnversionedObjectIDSet), - mu: &sync.RWMutex{}, + cache: &objectIDCacheImpl{}, + mu: &sync.RWMutex{}, } } @@ -43,7 +45,7 @@ func NewGenericFileFinder(contentTyper filesystem.ContentTyper, fs filesystem.Fi // It uses a ContentTyper to identify what content type a file uses. // // This implementation relies on that all information about what files exist -// is fed through SetMapping(s). If a file or ID is requested that doesn't +// is fed through {Set,Reset}Mapping. If a file or ID is requested that doesn't // exist in the internal cache, ErrNotTracked will be returned. // // Hence, this implementation does not at the moment support creating net-new @@ -53,9 +55,8 @@ type GenericFileFinder struct { contentTyper filesystem.ContentTyper fs filesystem.Filesystem - branch branch - pathToIDs map[string]core.UnversionedObjectIDSet - // mu guards branch and pathToIDs + cache objectIDCache + // mu guards cache mu *sync.RWMutex } @@ -67,28 +68,39 @@ func (f *GenericFileFinder) ContentTyper() filesystem.ContentTyper { return f.contentTyper } +func (f *GenericFileFinder) versionedCache(ctx context.Context) versionRef { + return f.cache.versionRef(core.GetVersionRef(ctx).Branch()) +} + // ObjectPath gets the file path relative to the root directory func (f *GenericFileFinder) ObjectPath(ctx context.Context, id core.UnversionedObjectID) (string, error) { - cp, ok := f.GetMapping(ctx, id) + // Lock for reading + f.mu.RLock() + defer f.mu.RUnlock() + + // Get the path for the given version and ID + p, ok := f.versionedCache(ctx).getID(id).get() if !ok { - // TODO: separate interface for "new creates"? return "", utilerrs.NewAggregate([]error{ErrNotTracked, core.NewErrNotFound(id)}) } - return cp.Path, nil + return p, nil } // ObjectsAt retrieves the ObjectIDs in the file with the given relative file path. func (f *GenericFileFinder) ObjectsAt(ctx context.Context, path string) (core.UnversionedObjectIDSet, error) { + // Lock for reading f.mu.RLock() defer f.mu.RUnlock() - // TODO: This needs to be per-branch too - ids, ok := f.pathToIDs[path] + + // Get the all the IDs for the given path + ids, ok := f.versionedCache(ctx).getIDs(path) if !ok { // TODO: Support "creation" of Objects easier, in a generic way through an interface, e.g. // NewObjectPlacer? - return nil, ErrNotTracked + return nil, fmt.Errorf("%q: %w", path, ErrNotTracked) } - return ids, nil + // Return a deep copy of the set; don't let the caller mess with our internal state + return ids.Copy(), nil } // ListNamespaces lists the available namespaces for the given GroupKind. @@ -102,10 +114,13 @@ func (f *GenericFileFinder) ObjectsAt(ctx context.Context, path string) (core.Un // different namespaces that have been set on any object belonging to // the given GroupKind. func (f *GenericFileFinder) ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.String, error) { + // Lock for reading f.mu.RLock() defer f.mu.RUnlock() - m := f.branch.groupKind(gk).raw() + // Get the versioned mapping between the groupkind and its namespaces + m := f.versionedCache(ctx).groupKind(gk).raw() + // Add all the namespaces to a stringset and return nsSet := sets.NewString() for ns := range m { nsSet.Insert(ns) @@ -119,80 +134,212 @@ func (f *GenericFileFinder) ListNamespaces(ctx context.Context, gk core.GroupKin // must only return object IDs for that given namespace. If any of the given // rules are violated, ErrNamespacedMismatch should be returned as a wrapped error. func (f *GenericFileFinder) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) (core.UnversionedObjectIDSet, error) { + // Lock for reading f.mu.RLock() defer f.mu.RUnlock() - m := f.branch.groupKind(gk).namespace(namespace).raw() - ids := make([]core.UnversionedObjectID, 0, len(m)) + // Get the versioned mapping between the groupkind & ns, and its registered names + m := f.versionedCache(ctx).groupKind(gk).namespace(namespace).raw() + // Create a sized ID set; and insert the IDs one-by-one + ids := core.NewUnversionedObjectIDSetSized(len(m)) for name := range m { - ids = append(ids, core.NewUnversionedObjectID(gk, core.ObjectKey{Name: name, Namespace: namespace})) + ids.Insert(core.NewUnversionedObjectID(gk, core.ObjectKey{Name: name, Namespace: namespace})) } - return core.NewUnversionedObjectIDSet(ids...), nil + return ids, nil } -// GetMapping retrieves a mapping in the system -func (f *GenericFileFinder) GetMapping(ctx context.Context, id core.UnversionedObjectID) (ChecksumPath, bool) { +// ChecksumForPath retrieves the latest known checksum for the given path. +func (f *GenericFileFinder) ChecksumForPath(ctx context.Context, path string) (string, bool) { + // Lock for reading f.mu.RLock() defer f.mu.RUnlock() - return f.getMapping(ctx, id) -} -// getMapping is like GetMapping; but without a read lock; for internal operations -func (f *GenericFileFinder) getMapping(ctx context.Context, id core.UnversionedObjectID) (ChecksumPath, bool) { - cp, ok := f.branch. - groupKind(id.GroupKind()). - namespace(id.ObjectKey().Namespace). - name(id.ObjectKey().Name) - return cp, ok + // Get the checksum for the given path at the given version + return f.versionedCache(ctx).getChecksum(path) } -// SetMapping binds an ID's virtual path to a physical file path -func (f *GenericFileFinder) SetMapping(ctx context.Context, id core.UnversionedObjectID, checksumPath ChecksumPath) { +// MoveFile moves an internal mapping from oldPath to newPath. moved == true if the oldPath +// existed and hence the move was performed. +func (f *GenericFileFinder) MoveFile(ctx context.Context, oldPath, newPath string) bool { + // Lock for writing f.mu.Lock() defer f.mu.Unlock() - f.branch. - groupKind(id.GroupKind()). - namespace(id.ObjectKey().Namespace). - setName(id.ObjectKey().Name, checksumPath) + // Get the versioned cache + cache := f.versionedCache(ctx) - // Create the mapping between the path and a set of IDs if it doesn't exist - _, ok := f.pathToIDs[checksumPath.Path] + // Get the set of object IDs oldPath points to + idSet, ok := cache.getIDs(oldPath) if !ok { - f.pathToIDs[checksumPath.Path] = core.NewUnversionedObjectIDSet() + logrus.Tracef("MoveFile: oldPath %q did not have any IDs", oldPath) + return false } - // Register the ID with the given path - f.pathToIDs[checksumPath.Path].Insert(id) -} + logrus.Tracef("MoveFile: idSet: %s", idSet) + + // Replace the map header; assign it the new path instead + cache.setIDs(newPath, idSet) + cache.deleteIDs(oldPath) + logrus.Tracef("MoveFile: Moved idSet from %q to %q", oldPath, newPath) -// ResetMappings replaces all mappings at once -func (f *GenericFileFinder) ResetMappings(ctx context.Context, m map[core.UnversionedObjectID]ChecksumPath) { - f.branch = &branchImpl{} - for id, cp := range m { - f.SetMapping(ctx, id, cp) + // Move the checksum info + checksum, ok := cache.getChecksum(oldPath) + if !ok { + logrus.Error("MoveFile: Expected checksum to be available, but wasn't") + // if this happens; newPath won't be mapped to any checksum; but nothing worse } + cache.setChecksum(newPath, checksum) + cache.setChecksum(oldPath, "") + logrus.Tracef("MoveFile: Moved checksum from %q to %q", oldPath, newPath) + + // Move the leveled-references of all IDs from the old to the new path + _ = idSet.ForEach(func(id core.UnversionedObjectID) error { + cache.getID(id).set(newPath) + return nil + }) + return true } -// DeleteMapping removes the physical file path mapping -// matching the given id -func (f *GenericFileFinder) DeleteMapping(ctx context.Context, id core.UnversionedObjectID) { +// SetMapping sets all the IDs that are stored in this path, for the given, updated checksum. +// ids must be the exact set of ObjectIDs that are observed at the given path; the previously-stored +// list will be overwritten. The new checksum will be recorded in the system for this path. +// The "added" set will record what IDs didn't exist before and were added. "duplicates" are IDs that +// were technically added, but already existed, mapped to other files in the system. Other files' +// mappings aren't removed in this function, but no new duplicates are added to this path. +// Instead such duplicates are returned instead. "removed" contains the set of IDs that existed +// previously, but were now removed. +// If ids is an empty set; all mappings to the given path will be removed, and "removed" will contain +// all prior mappings. (In fact, this is what DeleteMapping does.) +// +// ID sets are computed as follows (none of the sets overlap with each other): +// +// {ids} => {added} + {duplicates} + {removed} + {modified} +// +// {oldIDs} - {removed} + {added} => {newIDs} +func (f *GenericFileFinder) SetMapping(ctx context.Context, state ChecksumPath, newIDs core.UnversionedObjectIDSet) (added, duplicates, removed core.UnversionedObjectIDSet) { + // Lock for writing f.mu.Lock() defer f.mu.Unlock() - cp, ok := f.getMapping(ctx, id) - if !ok { - // Nothing to delete if it doesn't exist yet - return + return f.setIDsAtPath(f.versionedCache(ctx), state.Path, state.Checksum, newIDs) +} + +// internal method; not using any mutex; caller's responsibility +func (f *GenericFileFinder) setIDsAtPath(cache versionRef, path, checksum string, newIDs core.UnversionedObjectIDSet) (added, duplicates, removed core.UnversionedObjectIDSet) { + // Enforce an empty checksum for an empty newIDs + if newIDs.Len() == 0 { + checksum = "" } - // Delete it from the cache - f.branch. - groupKind(id.GroupKind()). - namespace(id.ObjectKey().Namespace). - deleteName(id.ObjectKey().Name) - // Delete the related ID from the path mapping too - f.pathToIDs[cp.Path].Delete(id) - // If the length of the set was shrunk to zero; delete it from the map completely - if f.pathToIDs[cp.Path].Len() == 0 { - delete(f.pathToIDs, cp.Path) + // Update the checksum. If len(checksum) == 0 this will delete the mapping + cache.setChecksum(path, checksum) + + // Get the old IDs; and compute the different "buckets" + oldIDs, _ := cache.getIDs(path) + logrus.Tracef("setIDsAtPath: oldIDs: %s", oldIDs) + // Get newID entries that are not present in oldIDs + added = newIDs.Difference(oldIDs) + logrus.Tracef("setIDsAtPath: added: %s", added) + + duplicates = core.NewUnversionedObjectIDSet() + + // Get oldIDs entries that are not present in newIDs + removed = oldIDs.Difference(newIDs) + logrus.Tracef("setIDsAtPath: removed: %s", removed) + + // Register the added items in the layered cache + _ = added.ForEach(func(addedID core.UnversionedObjectID) error { + n := cache.getID(addedID) + // Check if this name already exists somewhere else + otherPath, ok := n.get() + if ok && otherPath != path { + // If so; it is a duplicate; move it to duplicates + added.Delete(addedID) + duplicates.Insert(addedID) + return nil + } + // If it didn't exist somewhere else, add the mapping between this ID and path + n.set(path) + return nil + }) + + logrus.Tracef("setIDsAtPath: added post-filter: %s", added) + logrus.Tracef("setIDsAtPath: duplicates post-filter: %s", duplicates) + + // Remove the removed items from the layered cache + _ = removed.ForEach(func(removedID core.UnversionedObjectID) error { + cache.getID(removedID).delete() + return nil + }) + + // Finally, update the map from path to a set of IDs. + // Do not include the duplicates. We MUST NOT mutate the calling parameter. + finalIDs := newIDs.Copy().DeleteSet(duplicates) + logrus.Tracef("setIDsAtPath: finalIDs: %s", finalIDs) + cache.setIDs(path, finalIDs) + + // return the different buckets + return added, duplicates, removed +} + +// DeleteMapping removes a mapping for a given path to a file. Previously-stored IDs are returned. +func (f *GenericFileFinder) DeleteMapping(ctx context.Context, path string) (removed core.UnversionedObjectIDSet) { + // Lock for writing + f.mu.Lock() + defer f.mu.Unlock() + + // Re-use the setMappings internal function + _, _, removed = f.setIDsAtPath( + f.versionedCache(ctx), // Get the versioned cache + path, // Delete mappings at this path + "", // No checksum -> delete that mapping + core.NewUnversionedObjectIDSet(), // Empty "desired state" -> everything removed + ) + return +} + +// ResetMappings removes all prior data and sets all given mappings at once. +// Duplicates are NOT stored in the cache at all for this operation, instead they are returned. +func (f *GenericFileFinder) ResetMappings(ctx context.Context, m map[ChecksumPath]core.UnversionedObjectIDSet) (duplicates core.UnversionedObjectIDSet) { + f.mu.Lock() + defer f.mu.Unlock() + + // Completely clean up all existing data on the branch before starting. + cache := f.cache.cleanVersionRef(core.GetVersionRef(ctx).Branch()) + logrus.Trace("ResetMappings: cleaned branch") + + // Keep track of all duplicates there are in the mappings + duplicates = core.NewUnversionedObjectIDSet() + + // Go through all files and add them to the cache + for cp, allIDs := range m { + // The first "duplicate" entry will succeed in "making it" to the cache; but all the others + // will be registered here. After this iteration of set; remove the duplicates completely + // from the cache. + logrus.Tracef("ResetMappings: cp %v, allIDs: %s", cp, allIDs) + + // Re-use the internal setMappings function again. + // We don't need added & removed here, as we know that {allIDs} = {added} + {newDuplicates} + // Removals is always empty as we cleaned all mappings before we started this method. + _, newDuplicates, _ := f.setIDsAtPath(cache, cp.Path, cp.Checksum, allIDs) + logrus.Tracef("ResetMappings: newDuplicates: %s", newDuplicates) + // Add all duplicates together so we can process them later + duplicates.InsertSet(newDuplicates) } + + logrus.Tracef("ResetMappings: total duplicates: %s", duplicates) + + // Go and "fix up" (i.e. delete) the duplicates that were wrongly added previously + // In the resulting mappings; no duplicates are allowed (to avoid "races" at random + // between different duplicates otherwise) + _ = duplicates.ForEach(func(id core.UnversionedObjectID) error { + // Get the ID mapping so we get to know the underlying path + n := cache.getID(id) + duplicatePath, _ := n.get() + // Delete the ID mapping for that path + n.delete() + // Delete the ID also from the other map + cache.rawIDs()[duplicatePath].Delete(id) + return nil + }) + + return } diff --git a/pkg/storage/filesystem/unstructured/interfaces.go b/pkg/storage/filesystem/unstructured/interfaces.go index 66396d13..a1994818 100644 --- a/pkg/storage/filesystem/unstructured/interfaces.go +++ b/pkg/storage/filesystem/unstructured/interfaces.go @@ -12,15 +12,14 @@ import ( // of Storage. It uses an ObjectRecognizer to recognize // otherwise unknown objects in unstructured files. // The Storage must use a unstructured.FileFinder underneath. -// -// Multiple Objects in the same file, or multiple Objects with the -// same ID in multiple files are not supported. type Storage interface { filesystem.Storage - // Sync synchronizes the current state of the filesystem with the - // cached mappings in the unstructured.FileFinder. - Sync(ctx context.Context) ([]ChecksumPathID, error) + // Sync synchronizes the current state of the filesystem, and overwrites all + // previously cached mappings in the unstructured.FileFinder. "successful" + // mappings returned are those that are observed to be distinct. "duplicates" + // contains such IDs that weren't distinct; but existed in multiple files. + Sync(ctx context.Context) (successful, duplicates core.UnversionedObjectIDSet, err error) // ObjectRecognizer returns the underlying ObjectRecognizer used. ObjectRecognizer() ObjectRecognizer @@ -34,28 +33,50 @@ type Storage interface { type ObjectRecognizer interface { // RecognizeObjectIDs returns the ObjectIDs present in the file with the given name, // content type and content (in the FrameReader). - RecognizeObjectIDs(fileName string, fr serializer.FrameReader) (core.ObjectIDSet, error) + RecognizeObjectIDs(fileName string, fr serializer.FrameReader) ([]core.ObjectID, error) } // FileFinder is an extension to filesystem.FileFinder that allows it to have an internal -// cache with mappings between UnversionedObjectID and a ChecksumPath. This allows +// cache with mappings between an UnversionedObjectID and a ChecksumPath. This allows // higher-order interfaces to manage Objects in files in an unorganized directory // (e.g. a Git repo). // -// Multiple Objects in the same file, or multiple Objects with the -// same ID in multiple files are not supported. +// This implementation supports multiple IDs per file, and can deal with duplicate IDs across +// distinct file paths. This implementation supports looking at the context for VersionRef info. type FileFinder interface { filesystem.FileFinder - // GetMapping retrieves a mapping in the system. - GetMapping(ctx context.Context, id core.UnversionedObjectID) (ChecksumPath, bool) - // SetMapping binds an ID to a physical file path. This operation overwrites - // any previous mapping for id. - SetMapping(ctx context.Context, id core.UnversionedObjectID, checksumPath ChecksumPath) - // ResetMappings replaces all mappings at once to the ones in m. - ResetMappings(ctx context.Context, m map[core.UnversionedObjectID]ChecksumPath) - // DeleteMapping removes the mapping for the given id. - DeleteMapping(ctx context.Context, id core.UnversionedObjectID) + // SetMapping sets all the IDs that are stored in this path, for the given, updated checksum. + // ids must be the exact set of ObjectIDs that are observed at the given path; the previously-stored + // list will be overwritten. The new checksum will be recorded in the system for this path. + // The "added" set will record what IDs didn't exist before and were added. "duplicates" are IDs that + // were technically added, but already existed, mapped to other files in the system. Other files' + // mappings aren't removed in this function, but no new duplicates are added to this path. + // Instead such duplicates are returned instead. "removed" contains the set of IDs that existed + // previously, but were now removed. + // If ids is an empty set; all mappings to the given path will be removed, and "removed" will contain + // all prior mappings. (In fact, this is what DeleteMapping does.) + // + // ID sets are computed as follows (none of the sets overlap with each other): + // + // {ids} => {added} + {duplicates} + {removed} + {modified} + // + // {oldIDs} - {removed} + {added} => {newIDs} + SetMapping(ctx context.Context, state ChecksumPath, ids core.UnversionedObjectIDSet) (added, duplicates, removed core.UnversionedObjectIDSet) + + // ResetMappings removes all prior data and sets all given mappings at once. + // Duplicates are NOT stored in the cache at all for this operation, instead they are returned. + ResetMappings(ctx context.Context, mappings map[ChecksumPath]core.UnversionedObjectIDSet) (duplicates core.UnversionedObjectIDSet) + + // DeleteMapping removes a mapping for a given path to a file. Previously-stored IDs are returned. + DeleteMapping(ctx context.Context, path string) (removed core.UnversionedObjectIDSet) + + // ChecksumForPath retrieves the latest known checksum for the given path. + ChecksumForPath(ctx context.Context, path string) (string, bool) + + // MoveFile moves an internal mapping from oldPath to newPath. moved == true if the oldPath + // existed and hence the move was performed. + MoveFile(ctx context.Context, oldPath, newPath string) (moved bool) } // ChecksumPath is a tuple of a given Checksum and relative file Path, @@ -76,8 +97,3 @@ type ChecksumPath struct { // Path to the file, relative to filesystem.Filesystem.RootDirectory(). Path string } - -type ChecksumPathID struct { - ChecksumPath - ID core.ObjectID -} diff --git a/pkg/storage/filesystem/unstructured/mapped_cache.go b/pkg/storage/filesystem/unstructured/mapped_cache.go index 08aeb835..9b1d1351 100644 --- a/pkg/storage/filesystem/unstructured/mapped_cache.go +++ b/pkg/storage/filesystem/unstructured/mapped_cache.go @@ -1,50 +1,186 @@ package unstructured -import "github.com/weaveworks/libgitops/pkg/storage/core" +import ( + "github.com/sirupsen/logrus" + "github.com/weaveworks/libgitops/pkg/storage/core" +) // This file contains a set of private interfaces and implementations // that allows caching mappings between a core.UnversionedObjectID -// and a ChecksumPath. +// and paths & checksums. -// TODO: rename this interface -type branch interface { - groupKind(core.GroupKind) groupKind - raw() map[core.GroupKind]groupKind +// The point of having these interfaces in front the tree of maps is to +// lazy-initialize the maps only when needed, but without having to +// write if-then clauses all over the code. + +// NOTE: There are no mutexes in these interfaces, it is up to the caller +// to guard these for reading and writing. + +type objectIDCache interface { + // looks up the versionRef interface for the given key + versionRef(ref string) versionRef + // cleans all existing data on the versionRef, and returns a new, empty one + cleanVersionRef(ref string) versionRef +} + +type versionRef interface { + // looks up the groupKind interface for the given key + groupKind(gk core.GroupKind) groupKind + // shorthand to look up the interfaces all the way to the + // name interface all at once for the given ID + getID(id core.UnversionedObjectID) name + + // used to find all the IDs cached at a certain path + getIDs(path string) (core.UnversionedObjectIDSet, bool) + // used to overwrite the ID cache for a certain path + // If ids.Len() == 0; this is effectively a deleteIDs(path) + setIDs(path string, ids core.UnversionedObjectIDSet) + // deletes the ID cache for a certain path + deleteIDs(path string) + // returns the underlying path -> ID map for custom operations + rawIDs() map[string]core.UnversionedObjectIDSet + + // gets the checksum for the given path + getChecksum(path string) (string, bool) + // sets the checksum for the given path + // if len(checksum) == 0; this is deletes the checksum path key + setChecksum(path, checksum string) } type groupKind interface { + // looks up the namespace interface for the given key namespace(string) namespace + // raw returns the underlying map used; can be used for listing raw() map[string]namespace } type namespace interface { - name(string) (ChecksumPath, bool) - setName(string, ChecksumPath) - deleteName(string) - raw() map[string]ChecksumPath + // looks up the name interface for the given key + name(name string) name + // raw returns the underlying map used; can be used for listing + raw() map[string]string +} + +type name interface { + // gets the path for the given ID (given while traversing here) + get() (string, bool) + // sets the path for the given ID (given while traversing here) + set(path string) + // deletes the given ID's mapping to a path + delete() +} + +type objectIDCacheImpl struct { + versionRefs map[string]versionRef +} + +func (c *objectIDCacheImpl) versionRef(b string) versionRef { + if c.versionRefs == nil { + c.versionRefs = make(map[string]versionRef) + } + val, ok := c.versionRefs[b] + if !ok { + val = &versionRefImpl{} + c.versionRefs[b] = val + } + return val +} + +func (c *objectIDCacheImpl) cleanVersionRef(b string) versionRef { + if c.versionRefs == nil { + c.versionRefs = make(map[string]versionRef) + } + delete(c.versionRefs, b) + c.versionRefs[b] = &versionRefImpl{} + return c.versionRefs[b] } -type branchImpl struct { - m map[core.GroupKind]groupKind +type versionRefImpl struct { + // gkToNamespace maps the objectID hierarchy to a path + gkToNamespace map[core.GroupKind]groupKind + // pathToIDs maps a path to a set of IDs in that file + pathToIDs map[string]core.UnversionedObjectIDSet + // pathChecksums maps a path to a checksum + pathChecksums map[string]string } -func (b *branchImpl) groupKind(gk core.GroupKind) groupKind { - if b.m == nil { - b.m = make(map[core.GroupKind]groupKind) +func (b *versionRefImpl) groupKind(gk core.GroupKind) groupKind { + if b.gkToNamespace == nil { + b.gkToNamespace = make(map[core.GroupKind]groupKind) } - val, ok := b.m[gk] + val, ok := b.gkToNamespace[gk] if !ok { val = &groupKindImpl{} - b.m[gk] = val + b.gkToNamespace[gk] = val } return val } -func (b *branchImpl) raw() map[core.GroupKind]groupKind { - if b.m == nil { - b.m = make(map[core.GroupKind]groupKind) +func (b *versionRefImpl) getID(id core.UnversionedObjectID) name { + return b.groupKind(id.GroupKind()).namespace(id.ObjectKey().Namespace).name(id.ObjectKey().Name) +} + +func (b *versionRefImpl) getIDs(path string) (core.UnversionedObjectIDSet, bool) { + if b.pathToIDs == nil { + b.pathToIDs = make(map[string]core.UnversionedObjectIDSet) + } + val, ok := b.pathToIDs[path] + if !ok { + // always return a non-nil set + val = core.NewUnversionedObjectIDSet() + } + return val, ok +} + +func (b *versionRefImpl) setIDs(path string, ids core.UnversionedObjectIDSet) { + if b.pathToIDs == nil { + b.pathToIDs = make(map[string]core.UnversionedObjectIDSet) + } + // Delete if empty, otherwise set. + if ids.Len() == 0 { + logrus.Tracef("setIDs: Deleting pathToIDs[%s]", path) + delete(b.pathToIDs, path) + } else { + logrus.Tracef("setIDs: Setting pathToIDs[%s] = %s", path, ids) + b.pathToIDs[path] = ids + } +} + +func (b *versionRefImpl) rawIDs() map[string]core.UnversionedObjectIDSet { + if b.pathToIDs == nil { + b.pathToIDs = make(map[string]core.UnversionedObjectIDSet) + } + return b.pathToIDs +} + +func (b *versionRefImpl) deleteIDs(path string) { + if b.pathToIDs == nil { + b.pathToIDs = make(map[string]core.UnversionedObjectIDSet) + } + logrus.Tracef("deleteIDs: Deleting pathToIDs[%s]", path) + delete(b.pathToIDs, path) +} + +func (b *versionRefImpl) getChecksum(path string) (string, bool) { + if b.pathChecksums == nil { + b.pathChecksums = make(map[string]string) + } + val, ok := b.pathChecksums[path] + return val, ok +} + +func (b *versionRefImpl) setChecksum(path, checksum string) { + if b.pathChecksums == nil { + b.pathChecksums = make(map[string]string) + } + // Delete if empty, otherwise set. + if len(checksum) == 0 { + logrus.Tracef("setChecksum: Deleting pathChecksums[%s]", path) + delete(b.pathChecksums, path) + } else { + logrus.Tracef("setChecksum: Setting pathChecksums[%s] = %s", path, checksum) + b.pathChecksums[path] = checksum } - return b.m } type groupKindImpl struct { @@ -71,34 +207,39 @@ func (g *groupKindImpl) raw() map[string]namespace { } type namespaceImpl struct { - m map[string]ChecksumPath + m map[string]string } -func (n *namespaceImpl) name(name string) (ChecksumPath, bool) { +func (n *namespaceImpl) name(name string) name { if n.m == nil { - n.m = make(map[string]ChecksumPath) + n.m = make(map[string]string) } - cp, ok := n.m[name] - return cp, ok + return &nameImpl{&n.m, name} } -func (n *namespaceImpl) setName(name string, cp ChecksumPath) { +func (n *namespaceImpl) raw() map[string]string { if n.m == nil { - n.m = make(map[string]ChecksumPath) + n.m = make(map[string]string) } - n.m[name] = cp + return n.m } -func (n *namespaceImpl) deleteName(name string) { - if n.m == nil { - n.m = make(map[string]ChecksumPath) - } - delete(n.m, name) +type nameImpl struct { + parentM *map[string]string + name string } -func (n *namespaceImpl) raw() map[string]ChecksumPath { - if n.m == nil { - n.m = make(map[string]ChecksumPath) - } - return n.m +func (n *nameImpl) get() (string, bool) { + path, ok := (*n.parentM)[n.name] + return path, ok +} + +func (n *nameImpl) set(path string) { + logrus.Tracef("name.set: Setting namespace.m[%s] = %s", n.name, path) + (*n.parentM)[n.name] = path +} + +func (n *nameImpl) delete() { + logrus.Tracef("name.delete: Deleting namespace.m[%s]", n.name) + delete((*n.parentM), n.name) } diff --git a/pkg/storage/filesystem/unstructured/storage.go b/pkg/storage/filesystem/unstructured/storage.go index 5f30d9a4..57f25769 100644 --- a/pkg/storage/filesystem/unstructured/storage.go +++ b/pkg/storage/filesystem/unstructured/storage.go @@ -11,9 +11,6 @@ import ( "github.com/weaveworks/libgitops/pkg/storage/filesystem" ) -// ErrOnlySingleFrameSupported tells that only single frame-files are supported so far for the unstructured Storage. -var ErrOnlySingleFrameSupported = errors.New("file contains multiple Objects; for now only single-frame files are supported") - func NewGeneric(storage filesystem.Storage, recognizer ObjectRecognizer, pathExcluder filesystem.PathExcluder) (Storage, error) { if storage == nil { return nil, fmt.Errorf("storage is mandatory") @@ -40,71 +37,54 @@ type Generic struct { pathExcluder filesystem.PathExcluder } -// Sync synchronizes the current state of the filesystem with the -// cached mappings in the underlying unstructured.FileFinder. -func (s *Generic) Sync(ctx context.Context) ([]ChecksumPathID, error) { +// Sync synchronizes the current state of the filesystem, and overwrites all +// previously cached mappings in the unstructured.FileFinder. "successful" +// mappings returned are those that are observed to be distinct. "duplicates" +// contains such IDs that weren't distinct; but existed in multiple files. +func (s *Generic) Sync(ctx context.Context) (successful, duplicates core.UnversionedObjectIDSet, err error) { fileFinder := s.UnstructuredFileFinder() + fs := fileFinder.Filesystem() + contentTyper := fileFinder.ContentTyper() // List all valid files in the fs files, err := filesystem.ListValidFilesInFilesystem( ctx, - fileFinder.Filesystem(), - fileFinder.ContentTyper(), + fs, + contentTyper, s.PathExcluder(), ) if err != nil { - return nil, err + return nil, nil, err } - // Send SYNC events for all files (and fill the mappings - // of the unstructured.FileFinder) before starting to monitor changes - updatedFiles := make([]ChecksumPathID, 0, len(files)) - for _, filePath := range files { - // Get the current checksum of the file - currentChecksum, err := fileFinder.Filesystem().Checksum(ctx, filePath) - if err != nil { - logrus.Errorf("Could not get checksum for file %q: %v", filePath, err) - continue - } - - // If the given file already is tracked; i.e. has a mapping with a - // non-empty checksum, and the current checksum matches, we do not - // need to do anything. - if id, err := SingleObjectAt(ctx, fileFinder, filePath); err == nil { - if cp, ok := fileFinder.GetMapping(ctx, id); ok && len(cp.Checksum) != 0 { - if cp.Checksum == currentChecksum { - logrus.Tracef("Checksum for file %q is up-to-date: %q, skipping...", filePath, cp.Checksum) - continue - } - } - } + // Walk all files and fill the mappings of the unstructured.FileFinder. + allMappings := make(map[ChecksumPath]core.UnversionedObjectIDSet) + objectCount := 0 - // Read and recognize the file - id, err := ReadAndRecognizeFile( - ctx, - fileFinder.Filesystem(), - fileFinder.ContentTyper(), - s.recognizer, - filePath, - ) + for _, filePath := range files { + // Recognize the IDs in all the given file + idSet, cp, _, err := RecognizeIDsInFile(ctx, fileFinder, s.ObjectRecognizer(), filePath) if err != nil { - logrus.Warn(err) + logrus.Error(err) continue } + objectCount += idSet.Len() + allMappings[*cp] = idSet + } - // Add a mapping between this object and path - cp := ChecksumPath{ - Checksum: currentChecksum, - Path: filePath, - } - fileFinder.SetMapping(ctx, id, cp) - // Add to the slice which we'll return - updatedFiles = append(updatedFiles, ChecksumPathID{ - ChecksumPath: cp, - ID: id, - }) + // ResetMappings overwrites all data at once; so these + // mappings are now the "truth" about what's on disk + // Duplicate mappings are returned from ResetMappings + duplicates = fileFinder.ResetMappings(ctx, allMappings) + // Create an empty set for the "successful" IDs + successful = core.NewUnversionedObjectIDSet() + // For each set of IDs; add them to the "successful" batch + for _, set := range allMappings { + successful.InsertSet(set) } - return updatedFiles, nil + // Remove the duplicates from the successful bucket + successful.DeleteSet(duplicates) + return } // ObjectRecognizer returns the underlying ObjectRecognizer used. @@ -122,53 +102,58 @@ func (s *Generic) UnstructuredFileFinder() FileFinder { return s.fileFinder } -// ReadAndRecognizeFile reads the given file and its content type; and then recognizes it. -// It only supports one ObjectID per file at the moment. -func ReadAndRecognizeFile( +// RecognizeIDsInFile reads the given file and its content type; and then recognizes it. +// However, if the checksum is already up-to-date, the function returns directly, without +// reading the file. In that case, the bool is true (in all other cases, false). The +// ObjectIDSet and ChecksumPath are returned when err == nil. +func RecognizeIDsInFile( ctx context.Context, - fs filesystem.Filesystem, - contentTyper filesystem.ContentTyper, + fileFinder FileFinder, recognizer ObjectRecognizer, filePath string, -) (core.ObjectID, error) { +) (core.UnversionedObjectIDSet, *ChecksumPath, bool, error) { + fs := fileFinder.Filesystem() + contentTyper := fileFinder.ContentTyper() + + // Get the current checksum of the file + currentChecksum, err := fs.Checksum(ctx, filePath) + if err != nil { + return nil, nil, false, fmt.Errorf("Could not get checksum for file %q: %v", filePath, err) + } + cp := &ChecksumPath{Path: filePath, Checksum: currentChecksum} + + // Check the cached checksum + cachedChecksum, ok := fileFinder.ChecksumForPath(ctx, filePath) + if ok && cachedChecksum == currentChecksum { + // If the cache is up-to-date, we don't need to do anything + logrus.Tracef("Checksum for file %q is up-to-date: %q, skipping...", filePath, currentChecksum) + // Just get the IDs that are cached, and done. + idSet, err := fileFinder.ObjectsAt(ctx, filePath) + if err != nil { + return nil, nil, false, err + } + return idSet, cp, true, nil + } + // If the file is not known to the FileFinder yet, or if the checksum // was empty, read the file, and recognize it. content, err := fs.ReadFile(ctx, filePath) if err != nil { - return nil, fmt.Errorf("Could not read file %q: %v", filePath, err) + return nil, nil, false, fmt.Errorf("Could not read file %q: %v", filePath, err) } // Get the content type for this file so that we can read it properly ct, err := contentTyper.ContentTypeForPath(ctx, fs, filePath) if err != nil { - return nil, fmt.Errorf("Could not get content type for file %q: %v", filePath, err) + return nil, nil, false, fmt.Errorf("Could not get content type for file %q: %v", filePath, err) } // TODO: In the future this NewFrameReader should come from an interface, not // directly from the hard-coded serializer package. fr := serializer.NewFrameReader(ct, serializer.FromBytes(content)) // Recognize all IDs in the file - ids, err := recognizer.RecognizeObjectIDs(filePath, fr) + versionedIDs, err := recognizer.RecognizeObjectIDs(filePath, fr) if err != nil { - return nil, fmt.Errorf("Could not recognize object IDs in %q: %v", filePath, err) - } - // For now; we only support single-frame files - // TODO: Change this. - if ids.Len() != 1 { - return nil, fmt.Errorf("%w: %q", ErrOnlySingleFrameSupported, filePath) - } - // Return that one ID - return ids.List()[0], nil -} - -func SingleObjectAt(ctx context.Context, fileFinder filesystem.FileFinder, filePath string) (core.UnversionedObjectID, error) { - idSet, err := fileFinder.ObjectsAt(ctx, filePath) - if err != nil { - return nil, err - } - // For now; we only support single-frame files - // TODO: Change this. - if idSet.Len() != 1 { - return nil, fmt.Errorf("%w: %q", ErrOnlySingleFrameSupported, filePath) + return nil, nil, false, fmt.Errorf("Could not recognize object IDs in %q: %v", filePath, err) } - // Return that one ID - return idSet.List()[0], nil + // Convert to an unversioned set + return core.UnversionedObjectIDSetFromVersionedSlice(versionedIDs), cp, false, nil } From 40769355782907f83c9b0abb6f71105bca995935 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Fri, 5 Feb 2021 03:02:06 +0200 Subject: [PATCH 120/149] Allow the user to plug in their wanted FrameReaderFactory for reading custom formats. --- pkg/serializer/frame_reader.go | 35 +++++++++++-- pkg/storage/filesystem/dir_traversal.go | 3 +- .../filesystem/unstructured/event/storage.go | 11 ++++- .../filesystem/unstructured/interfaces.go | 4 +- .../filesystem/unstructured/storage.go | 49 ++++++++++++++----- 5 files changed, 81 insertions(+), 21 deletions(-) diff --git a/pkg/serializer/frame_reader.go b/pkg/serializer/frame_reader.go index a2ba308d..0b3af687 100644 --- a/pkg/serializer/frame_reader.go +++ b/pkg/serializer/frame_reader.go @@ -38,11 +38,22 @@ type FrameReader interface { ReadFrame() ([]byte, error) } -// NewFrameReader returns a FrameReader for the given ContentType and data in the -// ReadCloser. The Reader is automatically closed in io.EOF. ReadFrame is called -// once each Decoder.Decode() or Decoder.DecodeInto() call. When Decoder.DecodeAll() is -// called, the FrameReader is read until io.EOF, upon where it is closed. -func NewFrameReader(contentType ContentType, rc ReadCloser) FrameReader { +// FrameReaderFactory knows how to create various different FrameReaders for +// given ContentTypes. +type FrameReaderFactory interface { + // NewFrameReader returns a new FrameReader for the given ContentType, + // and ReadCloser that contains the underlying data that should be read. + NewFrameReader(contentType ContentType, rc ReadCloser) FrameReader +} + +// defaultFrameReaderFactory is the variable used in public methods. +var defaultFrameReaderFactory FrameReaderFactory = frameReaderFactory{} + +// frameReaderFactory is the default implementation of FrameReaderFactory. +type frameReaderFactory struct{} + +// Documentation below attached to NewFrameReader. +func (frameReaderFactory) NewFrameReader(contentType ContentType, rc ReadCloser) FrameReader { switch contentType { case ContentTypeYAML: return newFrameReader(json.YAMLFramer.NewFrameReader(rc), contentType) @@ -53,6 +64,20 @@ func NewFrameReader(contentType ContentType, rc ReadCloser) FrameReader { } } +// NewFrameReaderFactory returns the default variant of FrameReaderFactory capable +// of creating YAML- and JSON-compatible FrameReaders. +func NewFrameReaderFactory() FrameReaderFactory { + return frameReaderFactory{} +} + +// NewFrameReader returns a FrameReader for the given ContentType and data in the +// ReadCloser. The Reader is automatically closed in io.EOF. ReadFrame is called +// once each Decoder.Decode() or Decoder.DecodeInto() call. When Decoder.DecodeAll() is +// called, the FrameReader is read until io.EOF, upon where it is closed. +func NewFrameReader(contentType ContentType, rc ReadCloser) FrameReader { + return defaultFrameReaderFactory.NewFrameReader(contentType, rc) +} + // NewYAMLFrameReader returns a FrameReader that supports both YAML and JSON. Frames are separated by "---\n" // // This call is the same as NewFrameReader(ContentTypeYAML, rc) diff --git a/pkg/storage/filesystem/dir_traversal.go b/pkg/storage/filesystem/dir_traversal.go index 12284d71..31ec7f1d 100644 --- a/pkg/storage/filesystem/dir_traversal.go +++ b/pkg/storage/filesystem/dir_traversal.go @@ -27,7 +27,8 @@ func ListValidFilesInFilesystem(ctx context.Context, fs Filesystem, contentTyper // that contentTyper recognizes, and is not a path that is excluded by pathExcluder. func IsValidFileInFilesystem(ctx context.Context, fs Filesystem, contentTyper ContentTyper, pathExcluder PathExcluder, file string) bool { // return false if this path should be excluded - if pathExcluder.ShouldExcludePath(file) { + // pathExcluder can be nil; watch out for that + if pathExcluder != nil && pathExcluder.ShouldExcludePath(file) { return false } diff --git a/pkg/storage/filesystem/unstructured/event/storage.go b/pkg/storage/filesystem/unstructured/event/storage.go index fa8a9c06..c060e578 100644 --- a/pkg/storage/filesystem/unstructured/event/storage.go +++ b/pkg/storage/filesystem/unstructured/event/storage.go @@ -6,6 +6,7 @@ import ( gosync "sync" "github.com/sirupsen/logrus" + "github.com/weaveworks/libgitops/pkg/serializer" "github.com/weaveworks/libgitops/pkg/storage" "github.com/weaveworks/libgitops/pkg/storage/core" "github.com/weaveworks/libgitops/pkg/storage/event" @@ -49,7 +50,7 @@ func NewManifest( if err != nil { return nil, err } - unstructuredRaw, err := unstructured.NewGeneric(fsRaw, recognizer, pathExcluder) + unstructuredRaw, err := unstructured.NewGeneric(fsRaw, recognizer, pathExcluder, serializer.NewFrameReaderFactory()) if err != nil { return nil, err } @@ -285,7 +286,13 @@ func (s *Generic) handleModifyMove(ctx context.Context, ev *fileevents.FileEvent } // Recognize the contents of the file - idSet, cp, alreadyCached, err := unstructured.RecognizeIDsInFile(ctx, fileFinder, s.ObjectRecognizer(), ev.Path) + idSet, cp, alreadyCached, err := unstructured.RecognizeIDsInFile( + ctx, + fileFinder, + s.ObjectRecognizer(), + s.FrameReaderFactory(), + ev.Path, + ) if err != nil { return err } diff --git a/pkg/storage/filesystem/unstructured/interfaces.go b/pkg/storage/filesystem/unstructured/interfaces.go index a1994818..6b09e548 100644 --- a/pkg/storage/filesystem/unstructured/interfaces.go +++ b/pkg/storage/filesystem/unstructured/interfaces.go @@ -23,7 +23,9 @@ type Storage interface { // ObjectRecognizer returns the underlying ObjectRecognizer used. ObjectRecognizer() ObjectRecognizer - // PathExcluder specifies what paths to not sync + // FrameReaderFactory returns the underlying FrameReaderFactory used. + FrameReaderFactory() serializer.FrameReaderFactory + // PathExcluder specifies what paths to not sync. Can possibly be nil. PathExcluder() filesystem.PathExcluder // UnstructuredFileFinder returns the underlying unstructured.FileFinder used. UnstructuredFileFinder() FileFinder diff --git a/pkg/storage/filesystem/unstructured/storage.go b/pkg/storage/filesystem/unstructured/storage.go index 57f25769..c0b20702 100644 --- a/pkg/storage/filesystem/unstructured/storage.go +++ b/pkg/storage/filesystem/unstructured/storage.go @@ -11,30 +11,44 @@ import ( "github.com/weaveworks/libgitops/pkg/storage/filesystem" ) -func NewGeneric(storage filesystem.Storage, recognizer ObjectRecognizer, pathExcluder filesystem.PathExcluder) (Storage, error) { +// NewGeneric creates a new generic unstructured.Storage for the given underlying +// interfaces. storage and recognizer are mandatory, pathExcluder and framingFactory +// are optional (can be nil). framingFactory defaults to serializer.NewFrameReaderFactory(). +func NewGeneric( + storage filesystem.Storage, + recognizer ObjectRecognizer, + pathExcluder filesystem.PathExcluder, + framingFactory serializer.FrameReaderFactory, +) (Storage, error) { if storage == nil { return nil, fmt.Errorf("storage is mandatory") } if recognizer == nil { return nil, fmt.Errorf("recognizer is mandatory") } + // optional: use YAML/JSON by default. + if framingFactory == nil { + framingFactory = serializer.NewFrameReaderFactory() + } fileFinder, ok := storage.FileFinder().(FileFinder) if !ok { return nil, errors.New("the given filesystem.Storage must use a unstructured.FileFinder") } return &Generic{ - Storage: storage, - recognizer: recognizer, - fileFinder: fileFinder, - pathExcluder: pathExcluder, + Storage: storage, + recognizer: recognizer, + fileFinder: fileFinder, + pathExcluder: pathExcluder, + framingFactory: framingFactory, }, nil } type Generic struct { filesystem.Storage - recognizer ObjectRecognizer - fileFinder FileFinder - pathExcluder filesystem.PathExcluder + recognizer ObjectRecognizer + fileFinder FileFinder + pathExcluder filesystem.PathExcluder + framingFactory serializer.FrameReaderFactory } // Sync synchronizes the current state of the filesystem, and overwrites all @@ -63,7 +77,13 @@ func (s *Generic) Sync(ctx context.Context) (successful, duplicates core.Unversi for _, filePath := range files { // Recognize the IDs in all the given file - idSet, cp, _, err := RecognizeIDsInFile(ctx, fileFinder, s.ObjectRecognizer(), filePath) + idSet, cp, _, err := RecognizeIDsInFile( + ctx, + fileFinder, + s.ObjectRecognizer(), + s.FrameReaderFactory(), + filePath, + ) if err != nil { logrus.Error(err) continue @@ -92,6 +112,11 @@ func (s *Generic) ObjectRecognizer() ObjectRecognizer { return s.recognizer } +// FrameReaderFactory returns the underlying FrameReaderFactory used. +func (s *Generic) FrameReaderFactory() serializer.FrameReaderFactory { + return s.framingFactory +} + // PathExcluder specifies what paths to not sync func (s *Generic) PathExcluder() filesystem.PathExcluder { return s.pathExcluder @@ -110,6 +135,7 @@ func RecognizeIDsInFile( ctx context.Context, fileFinder FileFinder, recognizer ObjectRecognizer, + framingFactory serializer.FrameReaderFactory, filePath string, ) (core.UnversionedObjectIDSet, *ChecksumPath, bool, error) { fs := fileFinder.Filesystem() @@ -146,9 +172,8 @@ func RecognizeIDsInFile( if err != nil { return nil, nil, false, fmt.Errorf("Could not get content type for file %q: %v", filePath, err) } - // TODO: In the future this NewFrameReader should come from an interface, not - // directly from the hard-coded serializer package. - fr := serializer.NewFrameReader(ct, serializer.FromBytes(content)) + // Create a new FrameReader for the given ContentType and ReadCloser + fr := framingFactory.NewFrameReader(ct, serializer.FromBytes(content)) // Recognize all IDs in the file versionedIDs, err := recognizer.RecognizeObjectIDs(filePath, fr) if err != nil { From cf792b08aca53536a601f4979a688645ddda0793 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Fri, 5 Feb 2021 20:33:30 +0200 Subject: [PATCH 121/149] Reject expired contexts in the backend --- pkg/storage/backend/backend.go | 45 +++++++++++++++++++++++++++++++++- 1 file changed, 44 insertions(+), 1 deletion(-) diff --git a/pkg/storage/backend/backend.go b/pkg/storage/backend/backend.go index e9351642..5dbd04b9 100644 --- a/pkg/storage/backend/backend.go +++ b/pkg/storage/backend/backend.go @@ -68,6 +68,13 @@ type StatusWriter interface { UpdateStatus(ctx context.Context, obj Object) error } +// Backend combines the Reader and Writer interfaces for a fully-functioning backend +// implementation; used by the Client interface. Backend can be through as the "API Server" +// logic in between a "frontend" Client and "document" Storage. In other words, the backend +// handles serialization, versioning, validation, and policy enforcement. +// +// Any callable function should immediately abort if the given context from the client +// has expired; so an invalid context doesn't "leak down" to the Storage system. type Backend interface { Reader Writer @@ -166,6 +173,11 @@ func (b *Generic) StorageVersioner() StorageVersioner { } func (b *Generic) Get(ctx context.Context, obj Object) error { + // If the context has been cancelled or timed out; directly return an error + if err := ctx.Err(); err != nil { + return err + } + // Get the versioned ID for the given obj. This might mutate obj wrt namespacing info. id, err := b.idForObj(ctx, obj) if err != nil { @@ -192,6 +204,11 @@ func (b *Generic) Get(ctx context.Context, obj Object) error { // objects; for that the behavior is undefined (but returning an error // is recommended). func (b *Generic) ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.String, error) { + // If the context has been cancelled or timed out; directly return an error + if err := ctx.Err(); err != nil { + return nil, err + } + return b.storage.ListNamespaces(ctx, gk) } @@ -200,10 +217,20 @@ func (b *Generic) ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.S // root-spaced GroupKinds, the caller must not. When namespaced, this function // must only return object keys for that given namespace. func (b *Generic) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) (core.UnversionedObjectIDSet, error) { + // If the context has been cancelled or timed out; directly return an error + if err := ctx.Err(); err != nil { + return nil, err + } + return b.storage.ListObjectIDs(ctx, gk, namespace) } func (b *Generic) Create(ctx context.Context, obj Object) error { + // If the context has been cancelled or timed out; directly return an error + if err := ctx.Err(); err != nil { + return err + } + // We must never save metadata-only structs if serializer.IsPartialObject(obj) { return ErrCannotSaveMetadata @@ -231,7 +258,12 @@ func (b *Generic) Create(ctx context.Context, obj Object) error { // Internal, common write shared with Update() return b.write(ctx, id, obj) } -func (b *Generic) Update(ctx context.Context, obj Object) error { +func (b *Generic) Update(ctx context.Context, obj Object) error { // If the context has been cancelled or timed out; directly return an error + // If the context has been cancelled or timed out; directly return an error + if err := ctx.Err(); err != nil { + return err + } + // We must never save metadata-only structs if serializer.IsPartialObject(obj) { return ErrCannotSaveMetadata @@ -261,6 +293,11 @@ func (b *Generic) Update(ctx context.Context, obj Object) error { } func (b *Generic) UpdateStatus(ctx context.Context, obj Object) error { + // If the context has been cancelled or timed out; directly return an error + if err := ctx.Err(); err != nil { + return err + } + return core.ErrNotImplemented // TODO } @@ -294,6 +331,11 @@ func (b *Generic) write(ctx context.Context, id core.ObjectID, obj Object) error } func (b *Generic) Delete(ctx context.Context, obj Object) error { + // If the context has been cancelled or timed out; directly return an error + if err := ctx.Err(); err != nil { + return err + } + // Get the versioned ID for the given obj. This might mutate obj wrt namespacing info. id, err := b.idForObj(ctx, obj) if err != nil { @@ -319,6 +361,7 @@ func (b *Generic) Delete(ctx context.Context, obj Object) error { // Note: This should also work for unstructured and partial metadata objects func (b *Generic) idForObj(ctx context.Context, obj Object) (core.ObjectID, error) { + // Get the GroupVersionKind of the given object. gvk, err := serializer.GVKForObject(b.Scheme(), obj) if err != nil { return nil, err From 60c90f24a6580940f216c0249f71c4c00d7a9feb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Fri, 5 Feb 2021 21:43:36 +0200 Subject: [PATCH 122/149] Unify some locking code in the txclient --- pkg/storage/client/transactional/client.go | 52 ++++++++++------------ 1 file changed, 23 insertions(+), 29 deletions(-) diff --git a/pkg/storage/client/transactional/client.go b/pkg/storage/client/transactional/client.go index a0d3658b..aa3db913 100644 --- a/pkg/storage/client/transactional/client.go +++ b/pkg/storage/client/transactional/client.go @@ -66,35 +66,38 @@ type txLock struct { } func (c *Generic) Get(ctx context.Context, key core.ObjectKey, obj client.Object) error { - return c.lockForReading(ctx, func() error { + return c.lockAndReadBranch(ctx, func() error { return c.c.Get(ctx, key, obj) }) } func (c *Generic) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - return c.lockForReading(ctx, func() error { + return c.lockAndReadBranch(ctx, func() error { return c.c.List(ctx, list, opts...) }) } -func (c *Generic) lockForReading(ctx context.Context, operation func() error) error { - // Get the branch from the context, and lock it - return c.lockAndReadBranch(core.GetVersionRef(ctx).Branch(), operation) -} - -func (c *Generic) lockAndReadBranch(branch string, callback func() error) error { - // Use c.txsMu to guard reads and writes to the c.txs map +func (c *Generic) getBranchLockInfo(branch string) *txLock { + // c.txsMu guards reads and writes to the c.txs map c.txsMu.Lock() + defer c.txsMu.Unlock() + // Check if information about a transaction on this branch exists. txState, ok := c.txs[branch] - if !ok { - // grow the txs map by one - c.txs[branch] = &txLock{ - mu: &sync.RWMutex{}, - } - txState = c.txs[branch] + if ok { + return txState + } + // if not, grow the txs map by one and return it + c.txs[branch] = &txLock{ + mu: &sync.RWMutex{}, } - c.txsMu.Unlock() + return c.txs[branch] +} + +func (c *Generic) lockAndReadBranch(ctx context.Context, callback func() error) error { + // Aquire the branch-specific lock + branch := core.GetVersionRef(ctx).Branch() + txState := c.getBranchLockInfo(branch) // In the atomic mode, we lock the txLock during the read, // so no new transactions can be started while the read @@ -112,18 +115,8 @@ func (c *Generic) lockAndReadBranch(branch string, callback func() error) error } func (c *Generic) initTx(ctx context.Context, info TxInfo) (context.Context, txFunc) { - // Aquire the tx-specific lock - c.txsMu.Lock() - txState, ok := c.txs[info.Head] - if !ok { - // grow the txs map by one - c.txs[info.Head] = &txLock{ - mu: &sync.RWMutex{}, - } - txState = c.txs[info.Head] - } - txState.mode = info.Options.Mode - c.txsMu.Unlock() + // Aquire the branch-specific lock + txState := c.getBranchLockInfo(info.Head) // Wait for all reads to complete (in the case of the atomic more), // and then lock for writing. For non-atomic mode this uses the mutex @@ -135,7 +128,8 @@ func (c *Generic) initTx(ctx context.Context, info TxInfo) (context.Context, txF // on any reads happening at this moment. For all modes, this ensures // transactions happen in order. txState.mu.Lock() - txState.active = 1 // set tx state to "active" + txState.active = 1 // set tx state to "active" + txState.mode = info.Options.Mode // declare what transaction mode is used // Create a child context with a timeout dlCtx, cleanupTimeout := context.WithTimeout(ctx, info.Options.Timeout) From 2e257ae9539ccda749af65f9b6e609f4228184ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Fri, 5 Feb 2021 22:16:02 +0200 Subject: [PATCH 123/149] Move the singleframereader, update it slightly, and add a singleframewriter implementation too --- pkg/serializer/frame_reader.go | 39 ---------------- pkg/serializer/frame_single.go | 84 ++++++++++++++++++++++++++++++++++ 2 files changed, 84 insertions(+), 39 deletions(-) create mode 100644 pkg/serializer/frame_single.go diff --git a/pkg/serializer/frame_reader.go b/pkg/serializer/frame_reader.go index 0b3af687..c9fcd817 100644 --- a/pkg/serializer/frame_reader.go +++ b/pkg/serializer/frame_reader.go @@ -202,42 +202,3 @@ func FromFile(filePath string) ReadCloser { func FromBytes(content []byte) ReadCloser { return ioutil.NopCloser(bytes.NewReader(content)) } - -// NewSingleFrameReader returns a FrameReader for only a single frame of -// the specified content type. This avoids overhead if it is known that the -// byte array only contains one frame. The given frame is returned in -// whole in the first ReadFrame() call, and io.EOF is returned in all future -// invocations. This FrameReader works for any ContentType and transparently -// exposes what was given through the ContentType() method. -func NewSingleFrameReader(b []byte, ct ContentType) FrameReader { - return &singleFrameReader{ - ct: ct, - b: b, - hasBeenRead: false, - hasBeenReadMu: &sync.Mutex{}, - } -} - -var _ FrameReader = &singleFrameReader{} - -type singleFrameReader struct { - ct ContentType - b []byte - hasBeenRead bool - hasBeenReadMu *sync.Mutex -} - -func (r *singleFrameReader) ReadFrame() ([]byte, error) { - r.hasBeenReadMu.Lock() - defer r.hasBeenReadMu.Unlock() - // If ReadFrame() has been called once, just return io.EOF. - if r.hasBeenRead { - return nil, io.EOF - } - // The first time, mark that we've read, and return the single frame - r.hasBeenRead = true - return r.b, nil -} - -func (r *singleFrameReader) ContentType() ContentType { return r.ct } -func (r *singleFrameReader) Close() error { return nil } diff --git a/pkg/serializer/frame_single.go b/pkg/serializer/frame_single.go new file mode 100644 index 00000000..9d50da27 --- /dev/null +++ b/pkg/serializer/frame_single.go @@ -0,0 +1,84 @@ +package serializer + +import ( + "io" + "sync/atomic" +) + +// NewSingleFrameReader returns a FrameReader for only a single frame of +// the specified content type. This avoids overhead if it is known that the +// byte array only contains one frame. The given frame is returned in +// whole in the first ReadFrame() call, and io.EOF is returned in all future +// invocations. This FrameReader works for any ContentType and transparently +// exposes the given content type through the ContentType() method. +// This implementation is thread-safe. +func NewSingleFrameReader(b []byte, ct ContentType) FrameReader { + return &singleFrameReader{ + ct: ct, + b: b, + hasBeenRead: 0, + } +} + +// singleFrameReader implements the FrameReader interface. +var _ FrameReader = &singleFrameReader{} + +type singleFrameReader struct { + ct ContentType + b []byte + hasBeenRead uint32 +} + +func (r *singleFrameReader) ReadFrame() ([]byte, error) { + // The first time this function executes; hasBeenRead == 0. The atomic compare-and-swap + // operation checks if hasBeenRead == 0, and if so, sets it to one and returns true. + // This means that r.b will ever only be returned exactly once, as all the other cases + // (when hasBeenRead == 1), the compare-and-swap operation will return false => io.EOF. + if atomic.CompareAndSwapUint32(&r.hasBeenRead, 0, 1) { + // The first time, return the single frame we store + return r.b, nil + } + return nil, io.EOF +} + +func (r *singleFrameReader) ContentType() ContentType { return r.ct } +func (r *singleFrameReader) Close() error { return nil } + +// NewSingleFrameWriter returns a FrameWriter for only a single frame of +// the specified content type, using the underlying Writer. This FrameWriter +// will only ever write once; any successive calls will result in a io.ErrClosedPipe. +// This FrameWriter works for any ContentType and transparently exposes the given +// content type through the ContentType() method. +// This implementation is thread-safe. +func NewSingleFrameWriter(w Writer, ct ContentType) FrameWriter { + return &singleFrameWriter{ + ct: ct, + w: w, + hasBeenWritten: 0, + } +} + +// singleFrameWriter implements the FrameWriter interface. +var _ FrameWriter = &singleFrameWriter{} + +type singleFrameWriter struct { + ct ContentType + w Writer + hasBeenWritten uint32 +} + +func (r *singleFrameWriter) Write(p []byte) (n int, err error) { + // The first time this function executes; hasBeenWritten == 0. The atomic compare-and-swap + // operation checks if hasBeenWritten == 0, and if so, sets it to one and returns true. + // This means that r.b will ever only be returned exactly once, as all the other cases + // (when hasBeenWritten == 1), the compare-and-swap operation will return false => io.ErrClosedPipe. + if atomic.CompareAndSwapUint32(&r.hasBeenWritten, 0, 1) { + // The first time, write to the underlying writer + n, err = r.w.Write(p) + return + } + err = io.ErrClosedPipe + return +} + +func (r *singleFrameWriter) ContentType() ContentType { return r.ct } From 58475409b46f6350b37c2e984dea35472ad6c0bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Fri, 5 Feb 2021 22:16:29 +0200 Subject: [PATCH 124/149] Use the singleframewriter in the backend to support any content type. --- pkg/storage/backend/backend.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/storage/backend/backend.go b/pkg/storage/backend/backend.go index 5dbd04b9..bf0462f8 100644 --- a/pkg/storage/backend/backend.go +++ b/pkg/storage/backend/backend.go @@ -320,10 +320,10 @@ func (b *Generic) write(ctx context.Context, id core.ObjectID, obj Object) error } var objBytes bytes.Buffer - // TODO: Work with any ContentType, not just JSON/YAML. Make a SingleFrameWriter - // that works for any ContentType, and just ever writes one doc (which is what we need) - err = b.encoder.EncodeForGroupVersion(serializer.NewFrameWriter(ct, &objBytes), obj, gv) - if err != nil { + // This FrameWriter works for any content type; and transparently writes to objBytes + fw := serializer.NewSingleFrameWriter(&objBytes, ct) + // The encoder is set to use the given ContentType through fw; and encodes obj. + if err := b.encoder.EncodeForGroupVersion(fw, obj, gv); err != nil { return err } From e8949ffa2e6a37cc878b648a7e3719107168f432 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Fri, 5 Feb 2021 22:29:01 +0200 Subject: [PATCH 125/149] Implement ListGroupKinds in storage.Lister across the system. --- pkg/storage/backend/backend.go | 15 ++++++++++ pkg/storage/filesystem/filefinder_simple.go | 30 +++++++++++++++++++ pkg/storage/filesystem/storage.go | 11 +++++++ .../unstructured/filefinder_mapped.go | 15 ++++++++++ .../filesystem/unstructured/mapped_cache.go | 9 ++++++ pkg/storage/interfaces.go | 8 +++++ 6 files changed, 88 insertions(+) diff --git a/pkg/storage/backend/backend.go b/pkg/storage/backend/backend.go index bf0462f8..0021f0f1 100644 --- a/pkg/storage/backend/backend.go +++ b/pkg/storage/backend/backend.go @@ -198,6 +198,21 @@ func (b *Generic) Get(ctx context.Context, obj Object) error { return b.decoder.DecodeInto(serializer.NewSingleFrameReader(content, ct), obj) } +// ListGroupKinds returns all known GroupKinds by the implementation at that +// time. The set might vary over time as data is created and deleted; and +// should not be treated as an universal "what types could possibly exist", +// but more generally, "what are the GroupKinds of the objects that currently +// exist"? However, obviously, specific implementations might honor this +// guideline differently. This might be used for introspection into the system. +func (b *Generic) ListGroupKinds(ctx context.Context) ([]core.GroupKind, error) { + // If the context has been cancelled or timed out; directly return an error + if err := ctx.Err(); err != nil { + return nil, err + } + + return b.storage.ListGroupKinds(ctx) +} + // ListNamespaces lists the available namespaces for the given GroupKind. // This function shall only be called for namespaced objects, it is up to // the caller to make sure they do not call this method for root-spaced diff --git a/pkg/storage/filesystem/filefinder_simple.go b/pkg/storage/filesystem/filefinder_simple.go index 36ce3f10..52d60b20 100644 --- a/pkg/storage/filesystem/filefinder_simple.go +++ b/pkg/storage/filesystem/filefinder_simple.go @@ -155,6 +155,36 @@ func (f *SimpleFileFinder) ext() (string, error) { return f.resolver.ExtensionForContentType(f.contentTyper.ContentType) } +// ListGroupKinds returns all known GroupKinds by the implementation at that +// time. The set might vary over time as data is created and deleted; and +// should not be treated as an universal "what types could possibly exist", +// but more generally, "what are the GroupKinds of the objects that currently +// exist"? However, obviously, specific implementations might honor this +// guideline differently. This might be used for introspection into the system. +func (f *SimpleFileFinder) ListGroupKinds(ctx context.Context) ([]core.GroupKind, error) { + if f.opts.DisableGroupDirectory { + return nil, fmt.Errorf("cannot resolve GroupKinds when group directories are disabled: %w", core.ErrInvalidParameter) + } + + // List groups at top-level + groups, err := readDir(ctx, f.fs, "") + if err != nil { + return nil, err + } + // For all groups; also list all kinds, and add to the following list + groupKinds := []core.GroupKind{} + for _, group := range groups { + kinds, err := readDir(ctx, f.fs, group) + if err != nil { + return nil, err + } + for _, kind := range kinds { + groupKinds = append(groupKinds, core.GroupKind{Group: group, Kind: kind}) + } + } + return groupKinds, nil +} + // ListNamespaces lists the available namespaces for the given GroupKind. // This function shall only be called for namespaced objects, it is up to // the caller to make sure they do not call this method for root-spaced diff --git a/pkg/storage/filesystem/storage.go b/pkg/storage/filesystem/storage.go index ef084f33..57945f9c 100644 --- a/pkg/storage/filesystem/storage.go +++ b/pkg/storage/filesystem/storage.go @@ -129,6 +129,17 @@ func (r *Generic) Delete(ctx context.Context, id core.UnversionedObjectID) error return r.FileFinder().Filesystem().Remove(ctx, p) } +// ListGroupKinds returns all known GroupKinds by the implementation at that +// time. The set might vary over time as data is created and deleted; and +// should not be treated as an universal "what types could possibly exist", +// but more generally, "what are the GroupKinds of the objects that currently +// exist"? However, obviously, specific implementations might honor this +// guideline differently. This might be used for introspection into the system. +func (r *Generic) ListGroupKinds(ctx context.Context) ([]core.GroupKind, error) { + // Just use the underlying filefinder + return r.FileFinder().ListGroupKinds(ctx) +} + // ListNamespaces lists the available namespaces for the given GroupKind. // This function shall only be called for namespaced objects, it is up to // the caller to make sure they do not call this method for root-spaced diff --git a/pkg/storage/filesystem/unstructured/filefinder_mapped.go b/pkg/storage/filesystem/unstructured/filefinder_mapped.go index a8d45ff1..5b3d9429 100644 --- a/pkg/storage/filesystem/unstructured/filefinder_mapped.go +++ b/pkg/storage/filesystem/unstructured/filefinder_mapped.go @@ -103,6 +103,21 @@ func (f *GenericFileFinder) ObjectsAt(ctx context.Context, path string) (core.Un return ids.Copy(), nil } +// ListGroupKinds returns all known GroupKinds by the implementation at that +// time. The set might vary over time as data is created and deleted; and +// should not be treated as an universal "what types could possibly exist", +// but more generally, "what are the GroupKinds of the objects that currently +// exist"? However, obviously, specific implementations might honor this +// guideline differently. This might be used for introspection into the system. +func (f *GenericFileFinder) ListGroupKinds(ctx context.Context) ([]core.GroupKind, error) { + m := f.versionedCache(ctx).raw() + gks := make([]core.GroupKind, len(m)) + for gk := range m { + gks = append(gks, gk) + } + return gks, nil +} + // ListNamespaces lists the available namespaces for the given GroupKind. // This function shall only be called for namespaced objects, it is up to // the caller to make sure they do not call this method for root-spaced diff --git a/pkg/storage/filesystem/unstructured/mapped_cache.go b/pkg/storage/filesystem/unstructured/mapped_cache.go index 9b1d1351..8b9f1e8a 100644 --- a/pkg/storage/filesystem/unstructured/mapped_cache.go +++ b/pkg/storage/filesystem/unstructured/mapped_cache.go @@ -26,6 +26,8 @@ type objectIDCache interface { type versionRef interface { // looks up the groupKind interface for the given key groupKind(gk core.GroupKind) groupKind + // raw returns the underlying map used; can be used for listing + raw() map[core.GroupKind]groupKind // shorthand to look up the interfaces all the way to the // name interface all at once for the given ID getID(id core.UnversionedObjectID) name @@ -116,6 +118,13 @@ func (b *versionRefImpl) groupKind(gk core.GroupKind) groupKind { return val } +func (b *versionRefImpl) raw() map[core.GroupKind]groupKind { + if b.gkToNamespace == nil { + b.gkToNamespace = make(map[core.GroupKind]groupKind) + } + return b.gkToNamespace +} + func (b *versionRefImpl) getID(id core.UnversionedObjectID) name { return b.groupKind(id.GroupKind()).namespace(id.ObjectKey().Namespace).name(id.ObjectKey().Name) } diff --git a/pkg/storage/interfaces.go b/pkg/storage/interfaces.go index a016eff7..3d260603 100644 --- a/pkg/storage/interfaces.go +++ b/pkg/storage/interfaces.go @@ -79,6 +79,14 @@ type Reader interface { } type Lister interface { + // ListGroupKinds returns all known GroupKinds by the implementation at that + // time. The set might vary over time as data is created and deleted; and + // should not be treated as an universal "what types could possibly exist", + // but more generally, "what are the GroupKinds of the objects that currently + // exist"? However, obviously, specific implementations might honor this + // guideline differently. This might be used for introspection into the system. + ListGroupKinds(ctx context.Context) ([]core.GroupKind, error) + // ListNamespaces lists the available namespaces for the given GroupKind. // This function shall only be called for namespaced objects, it is up to // the caller to make sure they do not call this method for root-spaced From 653075a4340da2ba5446063614ea76435a9b3ba5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 18 Feb 2021 01:50:15 +0200 Subject: [PATCH 126/149] Fix small bug --- pkg/serializer/serializer.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/serializer/serializer.go b/pkg/serializer/serializer.go index 795590b6..8f8b5fdf 100644 --- a/pkg/serializer/serializer.go +++ b/pkg/serializer/serializer.go @@ -208,6 +208,7 @@ func NewSerializer(scheme *runtime.Scheme, codecs *k8sserializer.CodecFactory) S return &serializer{ LockedScheme: schemeLock, + codecs: codecs, converter: NewConverter(schemeLock), defaulter: NewDefaulter(schemeLock), patcher: NewPatcher( From 8230bcca9375bd19fc2bfcceb0bfe528cc673463 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 18 Feb 2021 01:50:55 +0200 Subject: [PATCH 127/149] Fix minor bugs in the txclient, and move one func to utils.go --- pkg/storage/client/transactional/client.go | 14 ++------------ pkg/storage/client/transactional/utils.go | 16 +++++++++++++++- 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/pkg/storage/client/transactional/client.go b/pkg/storage/client/transactional/client.go index aa3db913..4d786547 100644 --- a/pkg/storage/client/transactional/client.go +++ b/pkg/storage/client/transactional/client.go @@ -2,8 +2,6 @@ package transactional import ( "context" - "crypto/rand" - "encoding/hex" "fmt" "strings" "sync" @@ -246,6 +244,7 @@ func (c *Generic) transaction(ctx context.Context, opts ...TxOption) (Tx, error) err: err, c: c.c, manager: c.manager, + commitHook: c.CommitHookChain(), ctx: ctxWithDeadline, info: info, cleanupFunc: cleanupFunc, @@ -297,6 +296,7 @@ func (c *Generic) branchTransaction(ctx context.Context, headBranch string, opts err: err, c: c.c, manager: c.manager, + commitHook: c.CommitHookChain(), ctx: ctxWithDeadline, info: info, cleanupFunc: cleanupFunc, @@ -304,13 +304,3 @@ func (c *Generic) branchTransaction(ctx context.Context, headBranch string, opts merger: c.merger, }, nil } - -// randomSHA returns a hex-encoded string from {byteLen} random bytes. -func randomSHA(byteLen int) (string, error) { - b := make([]byte, byteLen) - _, err := rand.Read(b) - if err != nil { - return "", err - } - return hex.EncodeToString(b), nil -} diff --git a/pkg/storage/client/transactional/utils.go b/pkg/storage/client/transactional/utils.go index 4812266f..4c3f6cfa 100644 --- a/pkg/storage/client/transactional/utils.go +++ b/pkg/storage/client/transactional/utils.go @@ -1,6 +1,10 @@ package transactional -import "context" +import ( + "context" + "crypto/rand" + "encoding/hex" +) // execTransactionsCtx executes the functions in order. Before each // function in the chain is run; the context is checked for errors @@ -19,3 +23,13 @@ func execTransactionsCtx(ctx context.Context, funcs []txFunc) error { } return nil } + +// randomSHA returns a hex-encoded string from {byteLen} random bytes. +func randomSHA(byteLen int) (string, error) { + b := make([]byte, byteLen) + _, err := rand.Read(b) + if err != nil { + return "", err + } + return hex.EncodeToString(b), nil +} From 93200924f5e1be652c7c498d84e344f8b11b3073 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 18 Feb 2021 01:51:45 +0200 Subject: [PATCH 128/149] Create a btree high-level implementation that supports indexing and multi-versioning --- .../unstructured/btree/btree_cache.go | 215 ++++++++++++++++++ .../filesystem/unstructured/btree/utils.go | 20 ++ 2 files changed, 235 insertions(+) create mode 100644 pkg/storage/filesystem/unstructured/btree/btree_cache.go create mode 100644 pkg/storage/filesystem/unstructured/btree/utils.go diff --git a/pkg/storage/filesystem/unstructured/btree/btree_cache.go b/pkg/storage/filesystem/unstructured/btree/btree_cache.go new file mode 100644 index 00000000..d5753076 --- /dev/null +++ b/pkg/storage/filesystem/unstructured/btree/btree_cache.go @@ -0,0 +1,215 @@ +package btree + +import ( + "errors" + "fmt" + + "github.com/google/btree" +) + +var ( + ErrVersionRefNotFound = errors.New("version ref tree not found") + ErrVersionRefAlreadyExists = errors.New("version ref tree already exists") +) + +type OriginalBTreeItem = btree.Item + +type ItemIterator func(it Item) bool + +type ItemQuery interface { + btree.Item + fmt.Stringer +} + +type Item interface { + ItemQuery + GetValueItem() ValueItem +} + +type ValueItem interface { + Item + + Key() interface{} + KeyString() string + Value() interface{} + ValueString() string + IndexedPtrs() []Item +} + +type BTreeVersionedIndex interface { + VersionedTree(ref string) (BTreeIndex, bool) + NewVersionedTree(ref, base string) (BTreeIndex, error) + DeleteVersionedTree(ref string) +} + +func NewBTreeVersionedIndex() BTreeVersionedIndex { + return &bTreeVersionedIndexImpl{ + indexes: make(map[string]BTreeIndex), + freelist: btree.NewFreeList(btree.DefaultFreeListSize), + } +} + +type bTreeVersionedIndexImpl struct { + indexes map[string]BTreeIndex + freelist *btree.FreeList +} + +func (i *bTreeVersionedIndexImpl) VersionedTree(ref string) (BTreeIndex, bool) { + t, ok := i.indexes[ref] + return t, ok +} + +func (i *bTreeVersionedIndexImpl) NewVersionedTree(ref, base string) (BTreeIndex, error) { + // Make sure ref already doesn't exist + _, ok := i.VersionedTree(ref) + if ok { + return nil, fmt.Errorf("%w: %s", ErrVersionRefAlreadyExists, ref) + } + + var t2 BTreeIndex + if len(base) != 0 { + // Get the base versionref + t, ok := i.VersionedTree(base) + if !ok { + return nil, fmt.Errorf("%w: %s", ErrVersionRefNotFound, base) + } + // Clone the base BTree + t2 = &bTreeIndexImpl{btree: t.Internal().Clone(), parentRef: base} + } else { + // Create a new BTree with the shared freelist + t2 = newBTreeIndex(i.freelist) + } + // Register in the map + i.indexes[ref] = t2 + return t2, nil +} + +func (i *bTreeVersionedIndexImpl) DeleteVersionedTree(ref string) { + t, ok := i.VersionedTree(ref) + if ok { + // Move the nodes of the cow-part of the given BTree to the freelist for re-use + t.Internal().Clear(true) + } + // Just delete the index + delete(i.indexes, ref) +} + +func newBTreeIndex(freelist *btree.FreeList) BTreeIndex { + if freelist == nil { + return &bTreeIndexImpl{btree: btree.New(32)} + } + return &bTreeIndexImpl{btree: btree.NewWithFreeList(32, freelist)} +} + +type BTreeIndex interface { + // Get returns + Get(it ItemQuery) (Item, bool) + // Put inserts or overwrites it (including related indexes) in the underlying tree. + Put(it ValueItem) + // Delete deletes the item. Any related indexes to it are also removed. + Delete(it ItemQuery) + + // List iterates all the items that contain the given prefix, ascending order. + // If submatch is set, iteration splits the prefix subspace up so iteration + // starts from prefix+submatch. + // List returns how many items were processed (also including the possible + // "last" one that returned false to stop execution). + List(prefix, submatch string, iterator ItemIterator) (n uint32) + // Clear clears the btree completely, but re-uses some nodes for better speed + Clear() + + Internal() *btree.BTree +} + +type bTreeIndexImpl struct { + btree *btree.BTree + parentRef string +} + +func (i *bTreeIndexImpl) Get(it ItemQuery) (Item, bool) { + found := i.btree.Get(it) + if found != nil { + return found.(Item), true + } + return nil, false +} + +func (i *bTreeIndexImpl) Put(it ValueItem) { + // First, delete any previous, now stale, data related to this item + i.deleteIndexes(it) + // Add the item to the tree + i.btree.ReplaceOrInsert(it) + // Register all indexes of it, too + for _, idxPtr := range it.IndexedPtrs() { + i.btree.ReplaceOrInsert(idxPtr) + } +} + +func (i *bTreeIndexImpl) List(prefix, submatch string, iterator ItemIterator) uint32 { + until := AdvanceLastChar(prefix) + + // start iterating from the "pivot" element (that will not be traversed), + // all the way until the prefix isn't there anymore + j := uint32(0) + i.btree.AscendRange(strItem(prefix+submatch), strItem(until), func(i btree.Item) bool { + j += 1 + return iterator(i.(Item)) + }) + return j +} + +func (i *bTreeIndexImpl) Delete(it ItemQuery) { + // deleteIndexes returns true if it exists (=> needs to be deleted) + if i.deleteIndexes(it) { + // Delete the item itself from the tree + i.btree.Delete(it) + } +} + +// deleteIndexes deletes the indexes associated with it +// true is returned if the deletions were made, false +// if the item did not exist +func (i *bTreeIndexImpl) deleteIndexes(it ItemQuery) bool { + // Deliberately + found, ok := i.Get(it) + if !ok { + return false // nothing to do + } + + // Delete all indexes of it + for _, idxPtr := range found.GetValueItem().IndexedPtrs() { + i.btree.Delete(idxPtr) + } + return true +} + +func (i *bTreeIndexImpl) Internal() *btree.BTree { + return i.btree +} + +func (i *bTreeIndexImpl) Clear() { + i.btree.Clear(true) +} + +func NewIndexedPtr(ptr *ValueItem, str string) Item { + return &indexedPtr{ptr, str} +} + +var _ Item = &indexedPtr{} + +type indexedPtr struct { + ptr *ValueItem + str string +} + +func (s *indexedPtr) Less(item btree.Item) bool { return s.String() < item.(Item).String() } +func (s *indexedPtr) String() string { return s.str + ":" + s.GetValueItem().KeyString() } +func (s *indexedPtr) GetValueItem() ValueItem { return *s.ptr } + +var _ ItemQuery = strItem("") + +// strItem is only used for iteration; never actually stored in the B-tree +type strItem string + +func (s strItem) Less(item btree.Item) bool { return s.String() < item.(Item).String() } +func (s strItem) String() string { return string(s) } diff --git a/pkg/storage/filesystem/unstructured/btree/utils.go b/pkg/storage/filesystem/unstructured/btree/utils.go new file mode 100644 index 00000000..a92090a0 --- /dev/null +++ b/pkg/storage/filesystem/unstructured/btree/utils.go @@ -0,0 +1,20 @@ +package btree + +func GetValueString(index BTreeIndex, it Item) string { + it, ok := index.Get(it) + if !ok { + return "" + } + valItem := it.GetValueItem() + if valItem == nil { + return "" + } + return valItem.ValueString() +} + +// AdvanceLastChar sets the last character to the next available char, e.g. "Hello" -> "Hellp". +// This can be used when listing as a way to not use an inclusive start parameter. +func AdvanceLastChar(str string) string { + // TODO: if the last char already is 255, this should actually bump the second-last char, etc. + return str[:len(str)-1] + string(str[len(str)-1]+1) +} From ca752abc69b39692824c2f0c0b39e890ea88cfc9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 18 Feb 2021 02:13:44 +0200 Subject: [PATCH 129/149] Add a helper function to list unique submatches --- .../filesystem/unstructured/btree/utils.go | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/pkg/storage/filesystem/unstructured/btree/utils.go b/pkg/storage/filesystem/unstructured/btree/utils.go index a92090a0..107d197d 100644 --- a/pkg/storage/filesystem/unstructured/btree/utils.go +++ b/pkg/storage/filesystem/unstructured/btree/utils.go @@ -18,3 +18,42 @@ func AdvanceLastChar(str string) string { // TODO: if the last char already is 255, this should actually bump the second-last char, etc. return str[:len(str)-1] + string(str[len(str)-1]+1) } + +// UniqueIterFunc is used in ListUnique +type UniqueIterFunc func(it ValueItem) (start string, exclusive bool) + +// ListUnique traverses the index in ascending order for each item under prefix. +// However, when an item is matched, the UniqueIterFunc iterator decides where to +// start the search the next time. One possible implementation is to return the +// submatch (i.e. strings.TrimPrefix(it.Key(), prefix)) and set exclusive to true, +// which will make ListUnique skip all other "duplicate" items in the same prefix space. +// +// Example: +// index = {"bar:aa", "foo:aa:bb", "foo:aa:cc", "foo:aa:cc:dd", "foo:bb:cc", "foo:bb:dd", "foo:dd:ee"} +// prefix = "foo:" +// iterator returns exclusive == true, and strings.Split(it.Key)[1], e.g. "foo:aa:cc:dd" => "aa" +// Then the following items will be visited: {"foo:aa:bb", "foo:bb:cc", "foo:dd:ee"} +func ListUnique(index BTreeIndex, prefix string, iterator UniqueIterFunc) { + start := "" // indicates what submatch string to start matching from (inclusive as per List() default behavior) + exclusive := false + for { + // Traverse the list of all IDs in the system, but only read one ID at a time, then exit + // the iteration. Next time the iteration is started, "start" is forwarded so the list "jumps" + // all the duplicate items in between. + // The return value for a successful list is 1, but if it is 0 we know we have traversed all items + if index.List(prefix, start, func(it Item) bool { + // next round; start from the returned submatch + start, exclusive = iterator(it.GetValueItem()) + // If exclusive is true, this submatch will not be included in the next List call, as the last + // char is now advanced just slightly + if exclusive { + start = AdvanceLastChar(start) + } + // Always traverse just one object + return false + }) == 0 { + // Break when there are no more items under the prefix + break + } + } +} From fdca597503c7beff4903a278bd2aa1f56a4a32c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 18 Feb 2021 02:14:17 +0200 Subject: [PATCH 130/149] Refactor the unstructured FileFinder to work with the BTree instead --- .../unstructured/filefinder_mapped.go | 331 +++++++++++++----- 1 file changed, 248 insertions(+), 83 deletions(-) diff --git a/pkg/storage/filesystem/unstructured/filefinder_mapped.go b/pkg/storage/filesystem/unstructured/filefinder_mapped.go index 5b3d9429..bd9c6e0e 100644 --- a/pkg/storage/filesystem/unstructured/filefinder_mapped.go +++ b/pkg/storage/filesystem/unstructured/filefinder_mapped.go @@ -9,6 +9,7 @@ import ( "github.com/sirupsen/logrus" "github.com/weaveworks/libgitops/pkg/storage/core" "github.com/weaveworks/libgitops/pkg/storage/filesystem" + "github.com/weaveworks/libgitops/pkg/storage/filesystem/unstructured/btree" utilerrs "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" ) @@ -36,7 +37,7 @@ func NewGenericFileFinder(contentTyper filesystem.ContentTyper, fs filesystem.Fi return &GenericFileFinder{ contentTyper: contentTyper, fs: fs, - cache: &objectIDCacheImpl{}, + index: btree.NewBTreeVersionedIndex(), mu: &sync.RWMutex{}, } } @@ -55,8 +56,8 @@ type GenericFileFinder struct { contentTyper filesystem.ContentTyper fs filesystem.Filesystem - cache objectIDCache - // mu guards cache + index btree.BTreeVersionedIndex + // mu guards index mu *sync.RWMutex } @@ -68,8 +69,12 @@ func (f *GenericFileFinder) ContentTyper() filesystem.ContentTyper { return f.contentTyper } -func (f *GenericFileFinder) versionedCache(ctx context.Context) versionRef { - return f.cache.versionRef(core.GetVersionRef(ctx).Branch()) +func (f *GenericFileFinder) versionedIndex(ctx context.Context) (btree.BTreeIndex, error) { + i, ok := f.index.VersionedTree(core.GetVersionRef(ctx).Branch()) + if ok { + return i, nil + } + return nil, fmt.Errorf("no such versionref registered") } // ObjectPath gets the file path relative to the root directory @@ -78,12 +83,19 @@ func (f *GenericFileFinder) ObjectPath(ctx context.Context, id core.UnversionedO f.mu.RLock() defer f.mu.RUnlock() - // Get the path for the given version and ID - p, ok := f.versionedCache(ctx).getID(id).get() + // Get the versioned tree for the context + index, err := f.versionedIndex(ctx) + if err != nil { + return "", err + } + + // Lookup the BTree item for the given ID + p, ok := index.Get(newIDItem(id, "")) if !ok { return "", utilerrs.NewAggregate([]error{ErrNotTracked, core.NewErrNotFound(id)}) } - return p, nil + // Return the path + return p.GetValueItem().ValueString(), nil } // ObjectsAt retrieves the ObjectIDs in the file with the given relative file path. @@ -92,15 +104,30 @@ func (f *GenericFileFinder) ObjectsAt(ctx context.Context, path string) (core.Un f.mu.RLock() defer f.mu.RUnlock() - // Get the all the IDs for the given path - ids, ok := f.versionedCache(ctx).getIDs(path) - if !ok { + // Get the versioned tree for the context + index, err := f.versionedIndex(ctx) + if err != nil { + return nil, err + } + idSet := f.objectsAt(index, path) + // Error if there is no such known path + if idSet.Len() == 0 { // TODO: Support "creation" of Objects easier, in a generic way through an interface, e.g. // NewObjectPlacer? return nil, fmt.Errorf("%q: %w", path, ErrNotTracked) } - // Return a deep copy of the set; don't let the caller mess with our internal state - return ids.Copy(), nil + return idSet, nil +} + +func (f *GenericFileFinder) objectsAt(index btree.BTreeIndex, path string) core.UnversionedObjectIDSet { + // Traverse the objects belonging to the given path index + ids := core.NewUnversionedObjectIDSet() + index.List(idPathIndexField+":"+path, "", func(it btree.Item) bool { + // Insert each objectID belonging to that path into the set + ids.Insert(it.GetValueItem().Key().(core.UnversionedObjectID)) + return true + }) + return ids } // ListGroupKinds returns all known GroupKinds by the implementation at that @@ -110,11 +137,26 @@ func (f *GenericFileFinder) ObjectsAt(ctx context.Context, path string) (core.Un // exist"? However, obviously, specific implementations might honor this // guideline differently. This might be used for introspection into the system. func (f *GenericFileFinder) ListGroupKinds(ctx context.Context) ([]core.GroupKind, error) { - m := f.versionedCache(ctx).raw() - gks := make([]core.GroupKind, len(m)) - for gk := range m { - gks = append(gks, gk) + // Lock for reading + f.mu.RLock() + defer f.mu.RUnlock() + + // Get the versioned tree for the context + index, err := f.versionedIndex(ctx) + if err != nil { + return nil, err } + + gks := []core.GroupKind{} + // List GroupKinds directly under "id:*" + prefix := idField + ":" + // Extract the GroupKind from the visited item, and return the groupkind exclusively, so it + // won't be visited again + btree.ListUnique(index, prefix, func(it btree.ValueItem) (string, bool) { + gk := it.Key().(core.UnversionedObjectID).GroupKind() + gks = append(gks, gk) + return gk.String(), true + }) return gks, nil } @@ -133,13 +175,22 @@ func (f *GenericFileFinder) ListNamespaces(ctx context.Context, gk core.GroupKin f.mu.RLock() defer f.mu.RUnlock() - // Get the versioned mapping between the groupkind and its namespaces - m := f.versionedCache(ctx).groupKind(gk).raw() - // Add all the namespaces to a stringset and return + // Get the versioned tree for the context + index, err := f.versionedIndex(ctx) + if err != nil { + return nil, err + } + nsSet := sets.NewString() - for ns := range m { + // List namespaces under "id:{groupkind}:*" + prefix := idField + ":" + gk.String() + ":" + // Extract the namespace from the visited item, and return the groupkind exclusively, so it + // won't be visited again + btree.ListUnique(index, prefix, func(it btree.ValueItem) (string, bool) { + ns := it.Key().(core.UnversionedObjectID).ObjectKey().Namespace nsSet.Insert(ns) - } + return ns, true + }) return nsSet, nil } @@ -153,13 +204,19 @@ func (f *GenericFileFinder) ListObjectIDs(ctx context.Context, gk core.GroupKind f.mu.RLock() defer f.mu.RUnlock() - // Get the versioned mapping between the groupkind & ns, and its registered names - m := f.versionedCache(ctx).groupKind(gk).namespace(namespace).raw() - // Create a sized ID set; and insert the IDs one-by-one - ids := core.NewUnversionedObjectIDSetSized(len(m)) - for name := range m { - ids.Insert(core.NewUnversionedObjectID(gk, core.ObjectKey{Name: name, Namespace: namespace})) + // Get the versioned tree for the context + index, err := f.versionedIndex(ctx) + if err != nil { + return nil, err } + + ids := core.NewUnversionedObjectIDSet() + // List ObjectIDs under this "folder" + base := idField + ":" + gk.String() + ":" + namespace + ":" + index.List(base, "", func(it btree.Item) bool { + ids.Insert(it.GetValueItem().Key().(core.UnversionedObjectID)) + return true + }) return ids, nil } @@ -169,8 +226,21 @@ func (f *GenericFileFinder) ChecksumForPath(ctx context.Context, path string) (s f.mu.RLock() defer f.mu.RUnlock() + // Get the versioned tree for the context + index, err := f.versionedIndex(ctx) + if err != nil { + return "", false + } + return f.checksumForPath(index, path) +} + +func (f *GenericFileFinder) checksumForPath(index btree.BTreeIndex, path string) (string, bool) { // Get the checksum for the given path at the given version - return f.versionedCache(ctx).getChecksum(path) + item, ok := index.Get(newChecksumItem(path, "")) + if !ok { + return "", false + } + return item.GetValueItem().Value().(ChecksumPath).Checksum, true } // MoveFile moves an internal mapping from oldPath to newPath. moved == true if the oldPath @@ -180,37 +250,36 @@ func (f *GenericFileFinder) MoveFile(ctx context.Context, oldPath, newPath strin f.mu.Lock() defer f.mu.Unlock() - // Get the versioned cache - cache := f.versionedCache(ctx) - - // Get the set of object IDs oldPath points to - idSet, ok := cache.getIDs(oldPath) - if !ok { - logrus.Tracef("MoveFile: oldPath %q did not have any IDs", oldPath) + // Get the versioned tree for the context + index, err := f.versionedIndex(ctx) + if err != nil { + logrus.Debugf("MoveFile %s -> %s: got error from versionedIndex: %v", oldPath, newPath, err) return false } + + // Get all the ObjectIDs assigned to the old path + idSet := f.objectsAt(index, oldPath) logrus.Tracef("MoveFile: idSet: %s", idSet) - // Replace the map header; assign it the new path instead - cache.setIDs(newPath, idSet) - cache.deleteIDs(oldPath) - logrus.Tracef("MoveFile: Moved idSet from %q to %q", oldPath, newPath) + // Re-assign the IDs to the new path + _ = idSet.ForEach(func(id core.UnversionedObjectID) error { + index.Put(newIDItem(id, newPath)) + return nil + }) - // Move the checksum info - checksum, ok := cache.getChecksum(oldPath) + // Move the checksum info over by + // a) getting the checksum for the old path + // b) assigning that checksum to the new path + // c) deleting the item for the old path + checksum, ok := f.checksumForPath(index, oldPath) if !ok { logrus.Error("MoveFile: Expected checksum to be available, but wasn't") // if this happens; newPath won't be mapped to any checksum; but nothing worse } - cache.setChecksum(newPath, checksum) - cache.setChecksum(oldPath, "") + index.Put(newChecksumItem(newPath, checksum)) + index.Delete(newChecksumItem(newPath, checksum)) logrus.Tracef("MoveFile: Moved checksum from %q to %q", oldPath, newPath) - // Move the leveled-references of all IDs from the old to the new path - _ = idSet.ForEach(func(id core.UnversionedObjectID) error { - cache.getID(id).set(newPath) - return nil - }) return true } @@ -235,20 +304,31 @@ func (f *GenericFileFinder) SetMapping(ctx context.Context, state ChecksumPath, f.mu.Lock() defer f.mu.Unlock() - return f.setIDsAtPath(f.versionedCache(ctx), state.Path, state.Checksum, newIDs) + // Get the versioned tree for the context + index, err := f.versionedIndex(ctx) + if err != nil { + // Always return an empty set, although the version ref does not exist + added = core.NewUnversionedObjectIDSet() + duplicates = core.NewUnversionedObjectIDSet() + removed = core.NewUnversionedObjectIDSet() + return + } + + return f.setIDsAtPath(index, state.Path, state.Checksum, newIDs) } // internal method; not using any mutex; caller's responsibility -func (f *GenericFileFinder) setIDsAtPath(cache versionRef, path, checksum string, newIDs core.UnversionedObjectIDSet) (added, duplicates, removed core.UnversionedObjectIDSet) { - // Enforce an empty checksum for an empty newIDs +func (f *GenericFileFinder) setIDsAtPath(index btree.BTreeIndex, path, checksum string, newIDs core.UnversionedObjectIDSet) (added, duplicates, removed core.UnversionedObjectIDSet) { + // If there are no new ids, delete the checksum mapping if newIDs.Len() == 0 { - checksum = "" + index.Delete(newChecksumItem(path, "")) + } else { + // Update the checksum. + index.Put(newChecksumItem(path, checksum)) } - // Update the checksum. If len(checksum) == 0 this will delete the mapping - cache.setChecksum(path, checksum) // Get the old IDs; and compute the different "buckets" - oldIDs, _ := cache.getIDs(path) + oldIDs := f.objectsAt(index, path) logrus.Tracef("setIDsAtPath: oldIDs: %s", oldIDs) // Get newID entries that are not present in oldIDs added = newIDs.Difference(oldIDs) @@ -260,37 +340,30 @@ func (f *GenericFileFinder) setIDsAtPath(cache versionRef, path, checksum string removed = oldIDs.Difference(newIDs) logrus.Tracef("setIDsAtPath: removed: %s", removed) - // Register the added items in the layered cache + // Register the added items _ = added.ForEach(func(addedID core.UnversionedObjectID) error { - n := cache.getID(addedID) - // Check if this name already exists somewhere else - otherPath, ok := n.get() - if ok && otherPath != path { + itemToAdd := newIDItem(addedID, path) + // Check if this ID already exists in some other file. TODO: Is the second check needed? + if otherFile := btree.GetValueString(index, itemToAdd); len(otherFile) != 0 && otherFile != path { // If so; it is a duplicate; move it to duplicates added.Delete(addedID) duplicates.Insert(addedID) return nil } // If it didn't exist somewhere else, add the mapping between this ID and path - n.set(path) + index.Put(itemToAdd) return nil }) logrus.Tracef("setIDsAtPath: added post-filter: %s", added) logrus.Tracef("setIDsAtPath: duplicates post-filter: %s", duplicates) - // Remove the removed items from the layered cache + // Remove the removed items _ = removed.ForEach(func(removedID core.UnversionedObjectID) error { - cache.getID(removedID).delete() + index.Delete(newIDItem(removedID, "")) return nil }) - // Finally, update the map from path to a set of IDs. - // Do not include the duplicates. We MUST NOT mutate the calling parameter. - finalIDs := newIDs.Copy().DeleteSet(duplicates) - logrus.Tracef("setIDsAtPath: finalIDs: %s", finalIDs) - cache.setIDs(path, finalIDs) - // return the different buckets return added, duplicates, removed } @@ -301,9 +374,17 @@ func (f *GenericFileFinder) DeleteMapping(ctx context.Context, path string) (rem f.mu.Lock() defer f.mu.Unlock() + // Get the versioned tree for the context + index, err := f.versionedIndex(ctx) + if err != nil { + // Always return an empty set, although the version ref does not exist + removed = core.NewUnversionedObjectIDSet() + return + } + // Re-use the setMappings internal function _, _, removed = f.setIDsAtPath( - f.versionedCache(ctx), // Get the versioned cache + index, // Get the versioned index path, // Delete mappings at this path "", // No checksum -> delete that mapping core.NewUnversionedObjectIDSet(), // Empty "desired state" -> everything removed @@ -317,13 +398,19 @@ func (f *GenericFileFinder) ResetMappings(ctx context.Context, m map[ChecksumPat f.mu.Lock() defer f.mu.Unlock() + // Keep track of all duplicates there are in the mappings. + // Always return an empty set, although the version ref does not exist + duplicates = core.NewUnversionedObjectIDSet() + + // Get the versioned tree for the context + index, err := f.versionedIndex(ctx) + if err != nil { + return + } // Completely clean up all existing data on the branch before starting. - cache := f.cache.cleanVersionRef(core.GetVersionRef(ctx).Branch()) + index.Clear() logrus.Trace("ResetMappings: cleaned branch") - // Keep track of all duplicates there are in the mappings - duplicates = core.NewUnversionedObjectIDSet() - // Go through all files and add them to the cache for cp, allIDs := range m { // The first "duplicate" entry will succeed in "making it" to the cache; but all the others @@ -334,7 +421,7 @@ func (f *GenericFileFinder) ResetMappings(ctx context.Context, m map[ChecksumPat // Re-use the internal setMappings function again. // We don't need added & removed here, as we know that {allIDs} = {added} + {newDuplicates} // Removals is always empty as we cleaned all mappings before we started this method. - _, newDuplicates, _ := f.setIDsAtPath(cache, cp.Path, cp.Checksum, allIDs) + _, newDuplicates, _ := f.setIDsAtPath(index, cp.Path, cp.Checksum, allIDs) logrus.Tracef("ResetMappings: newDuplicates: %s", newDuplicates) // Add all duplicates together so we can process them later duplicates.InsertSet(newDuplicates) @@ -346,15 +433,93 @@ func (f *GenericFileFinder) ResetMappings(ctx context.Context, m map[ChecksumPat // In the resulting mappings; no duplicates are allowed (to avoid "races" at random // between different duplicates otherwise) _ = duplicates.ForEach(func(id core.UnversionedObjectID) error { - // Get the ID mapping so we get to know the underlying path - n := cache.getID(id) - duplicatePath, _ := n.get() - // Delete the ID mapping for that path - n.delete() - // Delete the ID also from the other map - cache.rawIDs()[duplicatePath].Delete(id) + index.Delete(newIDItem(id, "")) return nil }) return } + +// RegisterVersionRef registers a new "head" version ref, based (using copy-on-write logic), +// on the existing versionref "base". head must be non-nil, but base can be nil, if it is +// desired that "head" has no parent, and hence, is blank. An error is returned if head is +// nil, or base does not exist. +func (f *GenericFileFinder) RegisterVersionRef(head, base core.VersionRef) error { + if head == nil { + return fmt.Errorf("head must not be nil") + } + baseBranch := "" + if base != nil { + baseBranch = base.Branch() + } + _, err := f.index.NewVersionedTree(head.Branch(), baseBranch) + return err +} + +// HasVersionRef returns true if the given head version ref has been registered. +func (f *GenericFileFinder) HasVersionRef(head core.VersionRef) bool { + _, ok := f.index.VersionedTree(head.Branch()) + return ok +} + +// DeleteVersionRef deletes the given head version ref. +func (f *GenericFileFinder) DeleteVersionRef(head core.VersionRef) { + f.index.DeleteVersionedTree(head.Branch()) +} + +func newIDItem(id core.UnversionedObjectID, path string) btree.ValueItem { + return &idItemImpl{id: id, path: path} +} + +type idItemImpl struct { + id core.UnversionedObjectID + path string +} + +const ( + idField = "id" + idPathIndexField = "path" + + checksumField = "chk" +) + +func (i *idItemImpl) Less(item btree.OriginalBTreeItem) bool { + return i.String() < item.(btree.Item).String() +} +func (i *idItemImpl) String() string { return idField + ":" + i.KeyString() } +func (i *idItemImpl) GetValueItem() btree.ValueItem { return i } +func (i *idItemImpl) GetUnversionedObjectID() core.UnversionedObjectID { return i.id } +func (i *idItemImpl) Value() interface{} { return i.path } +func (i *idItemImpl) ValueString() string { return i.path } + +func (i *idItemImpl) Key() interface{} { return i.id } +func (i *idItemImpl) KeyString() string { + // TODO: Cache this and the output of IndexedPtrs()? + return i.id.GroupKind().String() + ":" + i.id.ObjectKey().Namespace + ":" + i.id.ObjectKey().Name +} + +func (i *idItemImpl) IndexedPtrs() []btree.Item { + var self btree.ValueItem = i + return []btree.Item{ + btree.NewIndexedPtr(&self, idPathIndexField+":"+i.path), + } +} + +func newChecksumItem(path, checksum string) btree.ValueItem { + return &checksumItemImpl{ChecksumPath{Path: path, Checksum: checksum}} +} + +type checksumItemImpl struct { + ChecksumPath +} + +func (i *checksumItemImpl) Less(item btree.OriginalBTreeItem) bool { + return i.String() < item.(btree.Item).String() +} +func (i *checksumItemImpl) String() string { return checksumField + ":" + i.KeyString() } +func (i *checksumItemImpl) GetValueItem() btree.ValueItem { return i } +func (i *checksumItemImpl) Key() interface{} { return i.Path } +func (i *checksumItemImpl) KeyString() string { return i.Path } +func (i *checksumItemImpl) Value() interface{} { return i.ChecksumPath } +func (i *checksumItemImpl) ValueString() string { return i.Checksum } +func (i *checksumItemImpl) IndexedPtrs() []btree.Item { return nil } From c33cbd5f702e0b5fb1f6f58b95a4e5abfbc77e12 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 18 Feb 2021 02:16:12 +0200 Subject: [PATCH 131/149] Add new versionref-related functions to the unstructured FileFinder, and automatically use it when syncing the unstructured Storage. --- .../filesystem/unstructured/interfaces.go | 10 ++++++++++ .../filesystem/unstructured/storage.go | 19 +++++++++++++++---- 2 files changed, 25 insertions(+), 4 deletions(-) diff --git a/pkg/storage/filesystem/unstructured/interfaces.go b/pkg/storage/filesystem/unstructured/interfaces.go index 6b09e548..7d0b64bf 100644 --- a/pkg/storage/filesystem/unstructured/interfaces.go +++ b/pkg/storage/filesystem/unstructured/interfaces.go @@ -79,6 +79,16 @@ type FileFinder interface { // MoveFile moves an internal mapping from oldPath to newPath. moved == true if the oldPath // existed and hence the move was performed. MoveFile(ctx context.Context, oldPath, newPath string) (moved bool) + + // RegisterVersionRef registers a new "head" version ref, based (using copy-on-write logic), + // on the existing versionref "base". head must be non-nil, but base can be nil, if it is + // desired that "head" has no parent, and hence, is blank. An error is returned if head is + // nil, or base does not exist. + RegisterVersionRef(head, base core.VersionRef) error + // HasVersionRef returns true if the given head version ref has been registered. + HasVersionRef(head core.VersionRef) bool + // DeleteVersionRef deletes the given head version ref. + DeleteVersionRef(head core.VersionRef) } // ChecksumPath is a tuple of a given Checksum and relative file Path, diff --git a/pkg/storage/filesystem/unstructured/storage.go b/pkg/storage/filesystem/unstructured/storage.go index c0b20702..c61cc94a 100644 --- a/pkg/storage/filesystem/unstructured/storage.go +++ b/pkg/storage/filesystem/unstructured/storage.go @@ -60,15 +60,28 @@ func (s *Generic) Sync(ctx context.Context) (successful, duplicates core.Unversi fs := fileFinder.Filesystem() contentTyper := fileFinder.ContentTyper() + // Create pre-made empty sets for the "successful" and "duplicate" IDs + // This ensures that although errors occur, we don't return a nil set + successful = core.NewUnversionedObjectIDSet() + duplicates = core.NewUnversionedObjectIDSet() + + ref := core.GetVersionRef(ctx) + if !fileFinder.HasVersionRef(ref) { + if err = fileFinder.RegisterVersionRef(ref, nil); err != nil { + return + } + } + // List all valid files in the fs - files, err := filesystem.ListValidFilesInFilesystem( + var files []string + files, err = filesystem.ListValidFilesInFilesystem( ctx, fs, contentTyper, s.PathExcluder(), ) if err != nil { - return nil, nil, err + return } // Walk all files and fill the mappings of the unstructured.FileFinder. @@ -96,8 +109,6 @@ func (s *Generic) Sync(ctx context.Context) (successful, duplicates core.Unversi // mappings are now the "truth" about what's on disk // Duplicate mappings are returned from ResetMappings duplicates = fileFinder.ResetMappings(ctx, allMappings) - // Create an empty set for the "successful" IDs - successful = core.NewUnversionedObjectIDSet() // For each set of IDs; add them to the "successful" batch for _, set := range allMappings { successful.InsertSet(set) From 0d4cfd6ee6dbabdc650476dc76c73487bf92cf94 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 18 Feb 2021 02:16:23 +0200 Subject: [PATCH 132/149] Remove the old mapped cache --- .../filesystem/unstructured/mapped_cache.go | 254 ------------------ 1 file changed, 254 deletions(-) delete mode 100644 pkg/storage/filesystem/unstructured/mapped_cache.go diff --git a/pkg/storage/filesystem/unstructured/mapped_cache.go b/pkg/storage/filesystem/unstructured/mapped_cache.go deleted file mode 100644 index 8b9f1e8a..00000000 --- a/pkg/storage/filesystem/unstructured/mapped_cache.go +++ /dev/null @@ -1,254 +0,0 @@ -package unstructured - -import ( - "github.com/sirupsen/logrus" - "github.com/weaveworks/libgitops/pkg/storage/core" -) - -// This file contains a set of private interfaces and implementations -// that allows caching mappings between a core.UnversionedObjectID -// and paths & checksums. - -// The point of having these interfaces in front the tree of maps is to -// lazy-initialize the maps only when needed, but without having to -// write if-then clauses all over the code. - -// NOTE: There are no mutexes in these interfaces, it is up to the caller -// to guard these for reading and writing. - -type objectIDCache interface { - // looks up the versionRef interface for the given key - versionRef(ref string) versionRef - // cleans all existing data on the versionRef, and returns a new, empty one - cleanVersionRef(ref string) versionRef -} - -type versionRef interface { - // looks up the groupKind interface for the given key - groupKind(gk core.GroupKind) groupKind - // raw returns the underlying map used; can be used for listing - raw() map[core.GroupKind]groupKind - // shorthand to look up the interfaces all the way to the - // name interface all at once for the given ID - getID(id core.UnversionedObjectID) name - - // used to find all the IDs cached at a certain path - getIDs(path string) (core.UnversionedObjectIDSet, bool) - // used to overwrite the ID cache for a certain path - // If ids.Len() == 0; this is effectively a deleteIDs(path) - setIDs(path string, ids core.UnversionedObjectIDSet) - // deletes the ID cache for a certain path - deleteIDs(path string) - // returns the underlying path -> ID map for custom operations - rawIDs() map[string]core.UnversionedObjectIDSet - - // gets the checksum for the given path - getChecksum(path string) (string, bool) - // sets the checksum for the given path - // if len(checksum) == 0; this is deletes the checksum path key - setChecksum(path, checksum string) -} - -type groupKind interface { - // looks up the namespace interface for the given key - namespace(string) namespace - // raw returns the underlying map used; can be used for listing - raw() map[string]namespace -} - -type namespace interface { - // looks up the name interface for the given key - name(name string) name - // raw returns the underlying map used; can be used for listing - raw() map[string]string -} - -type name interface { - // gets the path for the given ID (given while traversing here) - get() (string, bool) - // sets the path for the given ID (given while traversing here) - set(path string) - // deletes the given ID's mapping to a path - delete() -} - -type objectIDCacheImpl struct { - versionRefs map[string]versionRef -} - -func (c *objectIDCacheImpl) versionRef(b string) versionRef { - if c.versionRefs == nil { - c.versionRefs = make(map[string]versionRef) - } - val, ok := c.versionRefs[b] - if !ok { - val = &versionRefImpl{} - c.versionRefs[b] = val - } - return val -} - -func (c *objectIDCacheImpl) cleanVersionRef(b string) versionRef { - if c.versionRefs == nil { - c.versionRefs = make(map[string]versionRef) - } - delete(c.versionRefs, b) - c.versionRefs[b] = &versionRefImpl{} - return c.versionRefs[b] -} - -type versionRefImpl struct { - // gkToNamespace maps the objectID hierarchy to a path - gkToNamespace map[core.GroupKind]groupKind - // pathToIDs maps a path to a set of IDs in that file - pathToIDs map[string]core.UnversionedObjectIDSet - // pathChecksums maps a path to a checksum - pathChecksums map[string]string -} - -func (b *versionRefImpl) groupKind(gk core.GroupKind) groupKind { - if b.gkToNamespace == nil { - b.gkToNamespace = make(map[core.GroupKind]groupKind) - } - val, ok := b.gkToNamespace[gk] - if !ok { - val = &groupKindImpl{} - b.gkToNamespace[gk] = val - } - return val -} - -func (b *versionRefImpl) raw() map[core.GroupKind]groupKind { - if b.gkToNamespace == nil { - b.gkToNamespace = make(map[core.GroupKind]groupKind) - } - return b.gkToNamespace -} - -func (b *versionRefImpl) getID(id core.UnversionedObjectID) name { - return b.groupKind(id.GroupKind()).namespace(id.ObjectKey().Namespace).name(id.ObjectKey().Name) -} - -func (b *versionRefImpl) getIDs(path string) (core.UnversionedObjectIDSet, bool) { - if b.pathToIDs == nil { - b.pathToIDs = make(map[string]core.UnversionedObjectIDSet) - } - val, ok := b.pathToIDs[path] - if !ok { - // always return a non-nil set - val = core.NewUnversionedObjectIDSet() - } - return val, ok -} - -func (b *versionRefImpl) setIDs(path string, ids core.UnversionedObjectIDSet) { - if b.pathToIDs == nil { - b.pathToIDs = make(map[string]core.UnversionedObjectIDSet) - } - // Delete if empty, otherwise set. - if ids.Len() == 0 { - logrus.Tracef("setIDs: Deleting pathToIDs[%s]", path) - delete(b.pathToIDs, path) - } else { - logrus.Tracef("setIDs: Setting pathToIDs[%s] = %s", path, ids) - b.pathToIDs[path] = ids - } -} - -func (b *versionRefImpl) rawIDs() map[string]core.UnversionedObjectIDSet { - if b.pathToIDs == nil { - b.pathToIDs = make(map[string]core.UnversionedObjectIDSet) - } - return b.pathToIDs -} - -func (b *versionRefImpl) deleteIDs(path string) { - if b.pathToIDs == nil { - b.pathToIDs = make(map[string]core.UnversionedObjectIDSet) - } - logrus.Tracef("deleteIDs: Deleting pathToIDs[%s]", path) - delete(b.pathToIDs, path) -} - -func (b *versionRefImpl) getChecksum(path string) (string, bool) { - if b.pathChecksums == nil { - b.pathChecksums = make(map[string]string) - } - val, ok := b.pathChecksums[path] - return val, ok -} - -func (b *versionRefImpl) setChecksum(path, checksum string) { - if b.pathChecksums == nil { - b.pathChecksums = make(map[string]string) - } - // Delete if empty, otherwise set. - if len(checksum) == 0 { - logrus.Tracef("setChecksum: Deleting pathChecksums[%s]", path) - delete(b.pathChecksums, path) - } else { - logrus.Tracef("setChecksum: Setting pathChecksums[%s] = %s", path, checksum) - b.pathChecksums[path] = checksum - } -} - -type groupKindImpl struct { - m map[string]namespace -} - -func (g *groupKindImpl) namespace(ns string) namespace { - if g.m == nil { - g.m = make(map[string]namespace) - } - val, ok := g.m[ns] - if !ok { - val = &namespaceImpl{} - g.m[ns] = val - } - return val -} - -func (g *groupKindImpl) raw() map[string]namespace { - if g.m == nil { - g.m = make(map[string]namespace) - } - return g.m -} - -type namespaceImpl struct { - m map[string]string -} - -func (n *namespaceImpl) name(name string) name { - if n.m == nil { - n.m = make(map[string]string) - } - return &nameImpl{&n.m, name} -} - -func (n *namespaceImpl) raw() map[string]string { - if n.m == nil { - n.m = make(map[string]string) - } - return n.m -} - -type nameImpl struct { - parentM *map[string]string - name string -} - -func (n *nameImpl) get() (string, bool) { - path, ok := (*n.parentM)[n.name] - return path, ok -} - -func (n *nameImpl) set(path string) { - logrus.Tracef("name.set: Setting namespace.m[%s] = %s", n.name, path) - (*n.parentM)[n.name] = path -} - -func (n *nameImpl) delete() { - logrus.Tracef("name.delete: Deleting namespace.m[%s]", n.name) - delete((*n.parentM), n.name) -} From 0babdfdd894c3f5bc7492bdb574da7daa5dcfc81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 18 Feb 2021 02:17:05 +0200 Subject: [PATCH 133/149] Create a hook for automatically doing a copy-on-write for a new branch transaction --- cmd/sample-gitops/main.go | 16 ++++--- pkg/storage/filesystem/unstructured/tx/tx.go | 48 ++++++++++++++++++++ 2 files changed, 58 insertions(+), 6 deletions(-) create mode 100644 pkg/storage/filesystem/unstructured/tx/tx.go diff --git a/cmd/sample-gitops/main.go b/cmd/sample-gitops/main.go index 4bc80367..1657c906 100644 --- a/cmd/sample-gitops/main.go +++ b/cmd/sample-gitops/main.go @@ -33,6 +33,7 @@ import ( "github.com/weaveworks/libgitops/pkg/storage/filesystem" unstructuredfs "github.com/weaveworks/libgitops/pkg/storage/filesystem/unstructured" unstructuredevent "github.com/weaveworks/libgitops/pkg/storage/filesystem/unstructured/event" + unstructuredtx "github.com/weaveworks/libgitops/pkg/storage/filesystem/unstructured/tx" "github.com/weaveworks/libgitops/pkg/storage/kube" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) @@ -145,6 +146,8 @@ func run(identityFile, gitURL, ghToken, authorName, authorEmail, prMilestone str return err } + ctx = core.WithVersionRef(ctx, core.NewBranchRef(localClone.MainBranch())) + // Just use default encoders and decoders encoder := scheme.Serializer.Encoder() decoder := scheme.Serializer.Decoder() @@ -192,6 +195,10 @@ func run(identityFile, gitURL, ghToken, authorName, authorEmail, prMilestone str return err } + // Register a tx hook so that a new copy-on-write overlay is created when transactions are made + versionRefHook := unstructuredtx.NewUnstructuredStorageTxHandler(gitClient) + txClient.TransactionHookChain().Register(versionRefHook) + // Create a new CommitHook for sending PRs prCommitHook, err := githubpr.NewGitHubPRCommitHandler(ghClient, localClone.RepositoryRef()) if err != nil { @@ -242,17 +249,14 @@ func run(identityFile, gitURL, ghToken, authorName, authorEmail, prMilestone str // at .Get-time below. car := v1alpha1.Car{} carKey := core.ObjectKey{Name: name} - // Specify what our "base" branch is in the context; make it match the main branch - // of the Git clone. - branchCtx := core.WithVersionRef(ctx, core.NewBranchRef(localClone.MainBranch())) // Our head branch is the name of the Car, and it ends in a "-", which makes the // TxClient add a random sha suffix. headBranch := fmt.Sprintf("%s-update-", name) err := txClient. - BranchTransaction(branchCtx, headBranch). // Start a transaction of the base branch to the head - Get(carKey, &car). // Load the latest data of the Car into &car. - Custom(func(ctx context.Context) error { // Mutate (update) status of the Car + BranchTransaction(ctx, headBranch). // Start a transaction of the base branch to the head + Get(carKey, &car). // Load the latest data of the Car into &car. + Custom(func(ctx context.Context) error { // Mutate (update) status of the Car car.Status.Distance = rand.Uint64() car.Status.Speed = rand.Float64() * 100 return nil diff --git a/pkg/storage/filesystem/unstructured/tx/tx.go b/pkg/storage/filesystem/unstructured/tx/tx.go new file mode 100644 index 00000000..567d1f14 --- /dev/null +++ b/pkg/storage/filesystem/unstructured/tx/tx.go @@ -0,0 +1,48 @@ +package unstructuredtx + +import ( + "context" + + "github.com/weaveworks/libgitops/pkg/storage/client" + "github.com/weaveworks/libgitops/pkg/storage/client/transactional" + "github.com/weaveworks/libgitops/pkg/storage/core" + "github.com/weaveworks/libgitops/pkg/storage/filesystem" + "github.com/weaveworks/libgitops/pkg/storage/filesystem/unstructured" +) + +// NewUnstructuredStorageTxHandler returns a TransactionHook that before the transaction starts +// informs the unstructured.FileFinder (if used) that the new head branch should be created (if +// not already exists) using the base branch as the cow baseline. +func NewUnstructuredStorageTxHandler(c client.Client) transactional.TransactionHook { + fsStorage, ok := c.BackendReader().Storage().(filesystem.Storage) + if !ok { + return nil + } + fileFinder, ok := fsStorage.FileFinder().(unstructured.FileFinder) + if !ok { + return nil + } + return &unstructuredStorageTxHandler{fileFinder} +} + +type unstructuredStorageTxHandler struct { + fileFinder unstructured.FileFinder +} + +func (h *unstructuredStorageTxHandler) PreTransactionHook(ctx context.Context, info transactional.TxInfo) error { + head := core.NewBranchRef(info.Head) + if h.fileFinder.HasVersionRef(head) { + return nil // head exists, no-op + } + base := core.NewBranchRef(info.Base) + // If both head and base are the same, and we know that head does not exist in the system, we need to create + // head "from scratch" as a "root version" + if info.Head == info.Base { + base = nil + } + return h.fileFinder.RegisterVersionRef(head, base) +} + +func (h *unstructuredStorageTxHandler) PostTransactionHook(ctx context.Context, info transactional.TxInfo) error { + return nil // cleanup? +} From 7df21b46ebae50c4d9af672615f896731b0aecbd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 18 Feb 2021 02:17:31 +0200 Subject: [PATCH 134/149] go mod tidy --- go.mod | 2 +- go.sum | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 499f4821..3f4629c7 100644 --- a/go.mod +++ b/go.mod @@ -10,6 +10,7 @@ require ( github.com/fluxcd/pkg/ssh v0.0.5 github.com/go-git/go-git/v5 v5.2.0 github.com/go-openapi/spec v0.20.0 + github.com/google/btree v1.0.0 github.com/google/go-github/v32 v32.1.0 github.com/labstack/echo v3.3.10+incompatible github.com/labstack/gommon v0.3.0 // indirect @@ -21,7 +22,6 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.6.1 golang.org/x/sys v0.0.0-20210108172913-0df2131ae363 - k8s.io/api v0.19.2 k8s.io/apimachinery v0.19.6 k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 k8s.io/utils v0.0.0-20200912215256-4140de9c8800 diff --git a/go.sum b/go.sum index b4012697..e8f2095e 100644 --- a/go.sum +++ b/go.sum @@ -244,6 +244,7 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= From 5785701bb49c74b0c77ecf8ab5e4381dc3399db5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 24 Feb 2021 23:17:10 +0200 Subject: [PATCH 135/149] Document the btree index from the ground up, and implement Find/List in a good manner, instead of the prior hacks. Add a generic string-string ValueItem, and expose the queries publicly. --- .../unstructured/btree/btree_cache.go | 215 ----------- .../unstructured/btree/btree_index.go | 333 ++++++++++++++++++ .../btree/btree_versioned_index.go | 78 ++++ 3 files changed, 411 insertions(+), 215 deletions(-) delete mode 100644 pkg/storage/filesystem/unstructured/btree/btree_cache.go create mode 100644 pkg/storage/filesystem/unstructured/btree/btree_index.go create mode 100644 pkg/storage/filesystem/unstructured/btree/btree_versioned_index.go diff --git a/pkg/storage/filesystem/unstructured/btree/btree_cache.go b/pkg/storage/filesystem/unstructured/btree/btree_cache.go deleted file mode 100644 index d5753076..00000000 --- a/pkg/storage/filesystem/unstructured/btree/btree_cache.go +++ /dev/null @@ -1,215 +0,0 @@ -package btree - -import ( - "errors" - "fmt" - - "github.com/google/btree" -) - -var ( - ErrVersionRefNotFound = errors.New("version ref tree not found") - ErrVersionRefAlreadyExists = errors.New("version ref tree already exists") -) - -type OriginalBTreeItem = btree.Item - -type ItemIterator func(it Item) bool - -type ItemQuery interface { - btree.Item - fmt.Stringer -} - -type Item interface { - ItemQuery - GetValueItem() ValueItem -} - -type ValueItem interface { - Item - - Key() interface{} - KeyString() string - Value() interface{} - ValueString() string - IndexedPtrs() []Item -} - -type BTreeVersionedIndex interface { - VersionedTree(ref string) (BTreeIndex, bool) - NewVersionedTree(ref, base string) (BTreeIndex, error) - DeleteVersionedTree(ref string) -} - -func NewBTreeVersionedIndex() BTreeVersionedIndex { - return &bTreeVersionedIndexImpl{ - indexes: make(map[string]BTreeIndex), - freelist: btree.NewFreeList(btree.DefaultFreeListSize), - } -} - -type bTreeVersionedIndexImpl struct { - indexes map[string]BTreeIndex - freelist *btree.FreeList -} - -func (i *bTreeVersionedIndexImpl) VersionedTree(ref string) (BTreeIndex, bool) { - t, ok := i.indexes[ref] - return t, ok -} - -func (i *bTreeVersionedIndexImpl) NewVersionedTree(ref, base string) (BTreeIndex, error) { - // Make sure ref already doesn't exist - _, ok := i.VersionedTree(ref) - if ok { - return nil, fmt.Errorf("%w: %s", ErrVersionRefAlreadyExists, ref) - } - - var t2 BTreeIndex - if len(base) != 0 { - // Get the base versionref - t, ok := i.VersionedTree(base) - if !ok { - return nil, fmt.Errorf("%w: %s", ErrVersionRefNotFound, base) - } - // Clone the base BTree - t2 = &bTreeIndexImpl{btree: t.Internal().Clone(), parentRef: base} - } else { - // Create a new BTree with the shared freelist - t2 = newBTreeIndex(i.freelist) - } - // Register in the map - i.indexes[ref] = t2 - return t2, nil -} - -func (i *bTreeVersionedIndexImpl) DeleteVersionedTree(ref string) { - t, ok := i.VersionedTree(ref) - if ok { - // Move the nodes of the cow-part of the given BTree to the freelist for re-use - t.Internal().Clear(true) - } - // Just delete the index - delete(i.indexes, ref) -} - -func newBTreeIndex(freelist *btree.FreeList) BTreeIndex { - if freelist == nil { - return &bTreeIndexImpl{btree: btree.New(32)} - } - return &bTreeIndexImpl{btree: btree.NewWithFreeList(32, freelist)} -} - -type BTreeIndex interface { - // Get returns - Get(it ItemQuery) (Item, bool) - // Put inserts or overwrites it (including related indexes) in the underlying tree. - Put(it ValueItem) - // Delete deletes the item. Any related indexes to it are also removed. - Delete(it ItemQuery) - - // List iterates all the items that contain the given prefix, ascending order. - // If submatch is set, iteration splits the prefix subspace up so iteration - // starts from prefix+submatch. - // List returns how many items were processed (also including the possible - // "last" one that returned false to stop execution). - List(prefix, submatch string, iterator ItemIterator) (n uint32) - // Clear clears the btree completely, but re-uses some nodes for better speed - Clear() - - Internal() *btree.BTree -} - -type bTreeIndexImpl struct { - btree *btree.BTree - parentRef string -} - -func (i *bTreeIndexImpl) Get(it ItemQuery) (Item, bool) { - found := i.btree.Get(it) - if found != nil { - return found.(Item), true - } - return nil, false -} - -func (i *bTreeIndexImpl) Put(it ValueItem) { - // First, delete any previous, now stale, data related to this item - i.deleteIndexes(it) - // Add the item to the tree - i.btree.ReplaceOrInsert(it) - // Register all indexes of it, too - for _, idxPtr := range it.IndexedPtrs() { - i.btree.ReplaceOrInsert(idxPtr) - } -} - -func (i *bTreeIndexImpl) List(prefix, submatch string, iterator ItemIterator) uint32 { - until := AdvanceLastChar(prefix) - - // start iterating from the "pivot" element (that will not be traversed), - // all the way until the prefix isn't there anymore - j := uint32(0) - i.btree.AscendRange(strItem(prefix+submatch), strItem(until), func(i btree.Item) bool { - j += 1 - return iterator(i.(Item)) - }) - return j -} - -func (i *bTreeIndexImpl) Delete(it ItemQuery) { - // deleteIndexes returns true if it exists (=> needs to be deleted) - if i.deleteIndexes(it) { - // Delete the item itself from the tree - i.btree.Delete(it) - } -} - -// deleteIndexes deletes the indexes associated with it -// true is returned if the deletions were made, false -// if the item did not exist -func (i *bTreeIndexImpl) deleteIndexes(it ItemQuery) bool { - // Deliberately - found, ok := i.Get(it) - if !ok { - return false // nothing to do - } - - // Delete all indexes of it - for _, idxPtr := range found.GetValueItem().IndexedPtrs() { - i.btree.Delete(idxPtr) - } - return true -} - -func (i *bTreeIndexImpl) Internal() *btree.BTree { - return i.btree -} - -func (i *bTreeIndexImpl) Clear() { - i.btree.Clear(true) -} - -func NewIndexedPtr(ptr *ValueItem, str string) Item { - return &indexedPtr{ptr, str} -} - -var _ Item = &indexedPtr{} - -type indexedPtr struct { - ptr *ValueItem - str string -} - -func (s *indexedPtr) Less(item btree.Item) bool { return s.String() < item.(Item).String() } -func (s *indexedPtr) String() string { return s.str + ":" + s.GetValueItem().KeyString() } -func (s *indexedPtr) GetValueItem() ValueItem { return *s.ptr } - -var _ ItemQuery = strItem("") - -// strItem is only used for iteration; never actually stored in the B-tree -type strItem string - -func (s strItem) Less(item btree.Item) bool { return s.String() < item.(Item).String() } -func (s strItem) String() string { return string(s) } diff --git a/pkg/storage/filesystem/unstructured/btree/btree_index.go b/pkg/storage/filesystem/unstructured/btree/btree_index.go new file mode 100644 index 00000000..15e5b54f --- /dev/null +++ b/pkg/storage/filesystem/unstructured/btree/btree_index.go @@ -0,0 +1,333 @@ +package btree + +import ( + "strings" + + "github.com/google/btree" +) + +// AbstractItem is the abstract btree.Item, the ultimate base type for the B-Tree's ordering. +type AbstractItem = btree.Item + +// ItemString extends the abstract btree.Item with the "opinion" that all Items in this +// B-Tree have a string representation that is operated on as the B-Tree key. +// It should obey the following logic in ItemString.Less(than): +// - If than is an other ItemString, just perform a "return me < than" +// - If than is an ItemQuery, let the ItemQuery decide the ordering by calling than.QueryGTE(me) +type ItemString interface { + AbstractItem + // String returns the string representation of the given item, this serves as the B-Tree key + String() string +} + +// ItemQuery represents a query for the Index, where the user doesn't know the exact string +// representation of the item that is being searched for. The ItemQuery.Less function should +// function just as a "return me < than". However, when comparing an ItemString and an ItemQuery, +// the ItemQuery can fully decide the ordering, because the ItemString delegates the decision to +// the ItemQuery's QueryGTE function. This allows for flexible searching for items in the tree. +// +// When an ordering has been settled, e.g. ItemString1 < ItemQuery <= ItemString2 < ItemString3, and +// Index.Find() is called, ItemString2 will be returned (i.e. the "next item to the right"). +// When Index.List() is called, the iterator will be called for all it (ascending "to the +// right") for which ItemQuery.Matches(it) is true. +// +// ItemQueries are never persisted in the tree, they are only used for traversing the tree. +type ItemQuery interface { + AbstractItem + + // ItemQuery.QueryGTE(ItemString) is the same as (actually called from) ItemString.Less(ItemQuery). + QueryGTE(it ItemString) bool + // Matches returns true if the query matches the given item. It is used when iterating, after an + // ordering has been finalized. + Matches(it ItemString) bool +} + +// Item is the base type that is stored in the B-Tree. There are two main types of Items, ValueItems +// and indexed pointers to ValueItems. Hence, any Item points to a ValueItem in one way or an other. +type Item interface { + ItemString + // GetValueItem returns the ValueItem this Item points to (in the case of an index), or itself + // (in the case of Item already being a ValueItem). + GetValueItem() ValueItem +} + +// ItemIterator represents a callback function when iterating through a set of items in the tree. +// As long as true is returned, iteration continues. +type ItemIterator func(it Item) bool + +// ValueItem represents a mapped key to a value that is stored in the B-Tree. +type ValueItem interface { + Item + + // Key returns the key of this mapping + Key() interface{} + // Value returns the value of this mapping + Value() interface{} + // IndexedPtrs returns all indexed items that are pointing to this ValueItem + IndexedPtrs() []Item +} + +// Index represents one B-Tree that contains key-value mappings indexed by their key and possibly +// other fields. +type Index interface { + // Get returns an Item in the tree that matches exactly it (i.e. !it.Less(it2) && !it2.Less(it)) + // Both an ItemString (or higher) or an ItemQuery can be passed to this function. + Get(it AbstractItem) (Item, bool) + // Put inserts or overwrites the given ValueItem (including related indexes) in the underlying tree. + Put(it ValueItem) + // Delete deletes the ValueItem (and the related indexes) that is equal to it. True is returned if + // such an item actually existed in the tree and was deleted. + Delete(it AbstractItem) bool + + // Find returns the next item in ascending order, when the place for the ItemQuery q has been found as: + // Item1 < q <= Item2 < Item3 + // In this example, (Item2, true) would be returned, as long as q.Matches(Item2) == true. Otherwise, or + // if q is the maximum of the tree, (nil, false) is returned. + // See PrefixQuery and prefixPivotQuery for examples. + Find(q ItemQuery) (Item, bool) + // List returns the next items in ascending order, when the place for the ItemQuery q has been found as: + // Item1 < q <= Item2 < Item3 < Item4 + // In this example, it in [Item2, Item4] would be iterated, as long as q.Matches(it) == true. When false + // is returned from a match, iteration is stopped. + // See PrefixQuery and prefixPivotQuery for examples. + List(q ItemQuery, iterator ItemIterator) + + // Clear clears the B-Tree completely, but re-uses some nodes for better resource utilization. + // It does not disturb other trees that share the same Copy-on-Write base. + Clear() + + // Internal returns the underlying B-Tree. + Internal() *btree.BTree +} + +type bTreeIndexImpl struct { + btree *btree.BTree + parentRef string +} + +// Get returns an Item in the tree that matches exactly it (i.e. !it.Less(it2) && !it2.Less(it)) +// Both an ItemString (or higher) or an ItemQuery can be passed to this function. +func (i *bTreeIndexImpl) Get(it btree.Item) (Item, bool) { + found := i.btree.Get(it) + if found != nil { + return found.(Item), true + } + return nil, false +} + +// Put inserts or overwrites the given ValueItem (including related indexes) in the underlying tree. +func (i *bTreeIndexImpl) Put(it ValueItem) { + // First, delete any previous, now stale, data related to this item + i.deleteIndexes(it) + // Add the item to the tree + i.btree.ReplaceOrInsert(it) + // Register all indexes of it, too + for _, idxPtr := range it.IndexedPtrs() { + i.btree.ReplaceOrInsert(idxPtr) + } +} + +// Delete deletes the ValueItem (and the related indexes) that is equal to it. True is returned if +// such an item actually existed in the tree and was deleted. +func (i *bTreeIndexImpl) Delete(it btree.Item) bool { + // deleteIndexes returns true if it exists (=> needs to be deleted) + if !i.deleteIndexes(it) { + return false // nothing to delete + } + + // Delete the item itself from the tree + i.btree.Delete(it) + return true +} + +// deleteIndexes deletes the indexes associated with it +// true is returned if the deletions were made, false +// if the item did not exist +func (i *bTreeIndexImpl) deleteIndexes(it btree.Item) bool { + // Deliberately Get the item first, to resolve the ValueItem it points to + found, ok := i.Get(it) + if !ok { + return false // nothing to delete, not found + } + + // Delete all indexes of it + for _, idxPtr := range found.GetValueItem().IndexedPtrs() { + i.btree.Delete(idxPtr) + } + return true +} + +// Find returns the next item in ascending order, when the place for the ItemQuery q has been found as: +// Item1 < q <= Item2 < Item3 +// In this example, (Item2, true) would be returned, as long as q.Matches(Item2) == true. Otherwise, or +// if q is the maximum of the tree, (nil, false) is returned. +func (i *bTreeIndexImpl) Find(q ItemQuery) (retit Item, found bool) { + i.list(q, func(it Item) bool { + retit = it + found = true + return false // only find one item + }) + return // retit, found +} + +// List returns the next items in ascending order, when the place for the ItemQuery q has been found as: +// Item1 < q <= Item2 < Item3 < Item4 +// In this example, it in [Item2, Item4] would be iterated, as long as q.Matches(it) == true. When false +// is returned from a match, iteration is stopped. +func (i *bTreeIndexImpl) List(q ItemQuery, iterator ItemIterator) { + i.list(q, iterator) +} + +func (i *bTreeIndexImpl) list(q ItemQuery, iterator ItemIterator) { + var ii Item // cache ii between iteration callbacks + i.btree.AscendGreaterOrEqual(q, func(i btree.Item) bool { + ii = i.(Item) + if !q.Matches(ii) { // make sure ii matches the query + return false + } + return iterator(ii) + }) +} + +func (i *bTreeIndexImpl) Internal() *btree.BTree { return i.btree } +func (i *bTreeIndexImpl) Clear() { i.btree.Clear(true) } + +// NewItemString returns a new ItemString for the given B-Tree key. +// Custom ValueItems should embed this ItemString to automatically get +// the expected sorting functionality. +func NewItemString(key string) ItemString { + return &itemString{key} +} + +// itemString implements ItemString +var _ ItemString = &itemString{} + +type itemString struct{ key string } + +// Less implements the sorting functionality described in the ItemString godoc. +// If this Item is compared to an ItemQuery, the ItemQuery should decide the ordering. +// If this Item is compared to a fellow ItemString, just use simple string comparison. +func (s *itemString) Less(item btree.Item) bool { + switch it := item.(type) { + case ItemQuery: + return it.QueryGTE(s) + case ItemString: + return s.key < it.String() + default: + panic("items must implement either ItemQuery or ItemString") + } +} +func (s *itemString) String() string { return s.key } + +// NewIndexedPtr returns a new Item that for the given key, points to +// the given ValueItem. This means fields of ValueItems can be indexed +// using the following key, and added to the B-Tree. ptr must be non-nil +// otherwise this function will panic. The key of the pointed-to item +// will be appended to the sort key as well +func NewIndexedPtr(key string, ptr *ValueItem) Item { + if ptr == nil { + panic("NewIndexedPtr: ptr must not be nil") + } + return &indexedPtr{NewItemString(key + ":" + (*ptr).String()), ptr} +} + +// indexedPtr implements Item. +var _ Item = &indexedPtr{} + +// indexedPtr extends the ItemString with the given pointer to the ValueItem. +type indexedPtr struct { + ItemString + ptr *ValueItem +} + +func (s *indexedPtr) GetValueItem() ValueItem { return *s.ptr } + +// PrefixQuery implements ItemQuery +var _ ItemQuery = PrefixQuery("") + +// PrefixQuery is an ItemQuery that matches all items with the given prefix. For a Find() the smallest +// item with the given prefix is returned. For a List() the items containing the prefix will be iterated +// in ascending order (from smallest to largest). +// Example: bar:xx < foo:aa:aa < foo:aa:bb < foo:bb:aa < xx:yy:zz +// Find("foo:aa") => "foo:aa:aa" +// List("foo:aa") => {"foo:aa:aa", "foo:aa:bb"} +// Find("foo:bb") => "foo:bb:aa" +// List("foo:bb") => {"foo:bb:aa"} +type PrefixQuery string + +func (s PrefixQuery) Less(item btree.Item) bool { return string(s) < item.(ItemString).String() } +func (s PrefixQuery) QueryGTE(it ItemString) bool { return it.String() < string(s) } + +func (s PrefixQuery) Matches(it ItemString) bool { + return strings.HasPrefix(it.String(), string(s)) +} + +// NewPrefixPivotQuery returns an ItemQuery that matches all items with the given Prefix, but starting +// the search for items that don't start with "Prefix+Pivot". A Find() returns the smallest item that does +// not have the "Prefix+Pivot" prefix, but still contains "Prefix". A List() starts iterating the tree +// in ascending order (from smallest to largest) from the item returned by Find(). Behavior is undefined +// if Prefix or Pivot (or both) is an empty string. +// +// Example: bar:xx < foo:aa:aa < foo:aa:bb < foo:bb:aa < foo:bb:cc < foo:cc:zz < xx:yy:zz +// Find(Prefix: "foo:", Pivot: "aa") => "foo:bb:aa" +// List(Prefix: "foo:", Pivot: "aa") => {"foo:bb:aa", "foo:bb:cc", "foo:cc:zz"} +// Find(Prefix: "foo:", Pivot: "bb") => "foo:cc:zz" +// List(Prefix: "foo:", Pivot: "bb") => {"foo:cc:zz"} +func NewPrefixPivotQuery(prefix, pivot string) ItemQuery { + return &prefixPivotQuery{Prefix: prefix, Pivot: pivot} +} + +// prefixPivotQuery implements ItemQuery +var _ ItemQuery = &prefixPivotQuery{} + +type prefixPivotQuery struct { + Prefix string + Pivot string +} + +func (s *prefixPivotQuery) key() string { return s.Prefix + s.Pivot } +func (s *prefixPivotQuery) Less(item btree.Item) bool { + itStr := item.(ItemString).String() + return s.key() < itStr && !strings.HasPrefix(itStr, s.key()) +} + +func (s *prefixPivotQuery) QueryGTE(it ItemString) bool { + b := it.String() < s.key() || strings.HasPrefix(it.String(), s.key()) + return b +} +func (s *prefixPivotQuery) Matches(it ItemString) bool { + return strings.HasPrefix(it.String(), s.Prefix) +} + +// NewStringStringItem returns a new mapping (between a string-encoded key and value), that +// can be stored in the B-Tree. Keys stored under the same "bucket" prefix together essentially +// form a "virtual" map[string]string within the B-Tree, but with e.g. copy-on-write support. +// Any extra indexed fields registered will point to this ValueItem. +func NewStringStringItem(bucket, key, value string, indexedFields ...string) ValueItem { + str := key + if len(bucket) != 0 { + str = bucket + ":" + key + } + kvItem := &kvValueItem{ + ItemString: NewItemString(str), + key: key, + value: value, + indexes: make([]Item, 0, len(indexedFields)), + } + var valit ValueItem = kvItem + for _, indexedField := range indexedFields { + kvItem.indexes = append(kvItem.indexes, NewIndexedPtr(indexedField, &valit)) + } + return kvItem +} + +type kvValueItem struct { + ItemString + key, value string + indexes []Item +} + +func (i *kvValueItem) GetValueItem() ValueItem { return i } // this is already a ValueItem +func (i *kvValueItem) Key() interface{} { return i.key } // just return the plain key +func (i *kvValueItem) Value() interface{} { return i.value } // just return the plain value +func (i *kvValueItem) IndexedPtrs() []Item { return i.indexes } // indexes from constructor diff --git a/pkg/storage/filesystem/unstructured/btree/btree_versioned_index.go b/pkg/storage/filesystem/unstructured/btree/btree_versioned_index.go new file mode 100644 index 00000000..fb133e39 --- /dev/null +++ b/pkg/storage/filesystem/unstructured/btree/btree_versioned_index.go @@ -0,0 +1,78 @@ +package btree + +import ( + "errors" + "fmt" + + "github.com/google/btree" +) + +var ( + ErrVersionRefNotFound = errors.New("version ref tree not found") + ErrVersionRefAlreadyExists = errors.New("version ref tree already exists") +) + +type VersionedIndex interface { + VersionedTree(ref string) (Index, bool) + NewVersionedTree(ref, base string) (Index, error) + DeleteVersionedTree(ref string) +} + +func NewVersionedIndex() VersionedIndex { + return &bTreeVersionedIndexImpl{ + indexes: make(map[string]Index), + freelist: btree.NewFreeList(btree.DefaultFreeListSize), + } +} + +type bTreeVersionedIndexImpl struct { + indexes map[string]Index + freelist *btree.FreeList +} + +func (i *bTreeVersionedIndexImpl) VersionedTree(ref string) (Index, bool) { + t, ok := i.indexes[ref] + return t, ok +} + +func (i *bTreeVersionedIndexImpl) NewVersionedTree(ref, base string) (Index, error) { + // Make sure ref already doesn't exist + _, ok := i.VersionedTree(ref) + if ok { + return nil, fmt.Errorf("%w: %s", ErrVersionRefAlreadyExists, ref) + } + + var t2 Index + if len(base) != 0 { + // Get the base versionref + t, ok := i.VersionedTree(base) + if !ok { + return nil, fmt.Errorf("%w: %s", ErrVersionRefNotFound, base) + } + // Clone the base BTree + t2 = &bTreeIndexImpl{btree: t.Internal().Clone(), parentRef: base} + } else { + // Create a new BTree with the shared freelist + t2 = newIndex(i.freelist) + } + // Register in the map + i.indexes[ref] = t2 + return t2, nil +} + +func (i *bTreeVersionedIndexImpl) DeleteVersionedTree(ref string) { + t, ok := i.VersionedTree(ref) + if ok { + // Move the nodes of the cow-part of the given BTree to the freelist for re-use + t.Internal().Clear(true) + } + // Just delete the index + delete(i.indexes, ref) +} + +func newIndex(freelist *btree.FreeList) Index { + if freelist == nil { + return &bTreeIndexImpl{btree: btree.New(32)} + } + return &bTreeIndexImpl{btree: btree.NewWithFreeList(32, freelist)} +} From 2844d8cdf6665c8a10a82cfd1627b26e8e68d8c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 24 Feb 2021 23:17:34 +0200 Subject: [PATCH 136/149] Add unit test for the btree index impls. --- .../unstructured/btree/btree_index_test.go | 277 ++++++++++++++++++ 1 file changed, 277 insertions(+) create mode 100644 pkg/storage/filesystem/unstructured/btree/btree_index_test.go diff --git a/pkg/storage/filesystem/unstructured/btree/btree_index_test.go b/pkg/storage/filesystem/unstructured/btree/btree_index_test.go new file mode 100644 index 00000000..f2a356f7 --- /dev/null +++ b/pkg/storage/filesystem/unstructured/btree/btree_index_test.go @@ -0,0 +1,277 @@ +package btree + +import ( + "reflect" + "strconv" + "testing" +) + +func Test_ItemString_Less_ItemString_QueryPrefix(t *testing.T) { + tests := []struct { + str string + than string + want bool + }{ + {"", "", false}, + {"", "foo", true}, + {"foo", "", false}, + {"a", "b", true}, + {"a:a", "a:b", true}, + {"a:c", "a:b", false}, + {"b:a", "a:b", false}, + {"id:Bar.foo.com", "path:sample-file.yaml", true}, + {"id:Bar.foo.com", "checksum:123", false}, + {"path:sample-file.yaml:key:Baz.foo.com:default:foo:sample-file.yaml", "path:sample-file.yaml:key:Bar.foo.com:custom:foo:sample-file.yaml", false}, + } + for i, tt := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + if got := NewItemString(tt.str).Less(NewItemString(tt.than)); got != tt.want { + t.Errorf("NewItemString.Less(NewItemString) = %v, want %v", got, tt.want) + } + if got := NewItemString(tt.str).Less(PrefixQuery(tt.than)); got != tt.want { + t.Errorf("NewItemString.Less(PrefixQuery) = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_ItemString_Less_QueryPrefixPivot(t *testing.T) { + tests := []struct { + str string + prefix, pivot string + want bool + }{ + {"", "foo", "", true}, + {"a", "b", "", true}, + {"a:a", "a:b", "", true}, + {"a:c", "a:b", "", false}, + {"b:a", "a:b", "", false}, + {"id:Bar.foo.com", "path:sample-file.yaml", "", true}, + {"id:Bar.foo.com", "checksum:123", "", false}, + {"path:sample-file.yaml:key:Baz.foo.com:default:foo:sample-file.yaml", "path:sample-file.yaml:key:Bar.foo.com:custom:foo:sample-file.yaml", "", false}, + // bar:xx < foo:aa:aa < foo:aa:bb < foo:bb:aa < foo:bb:cc < foo:cc:zz < xx:yy:zz + {"bar:xx", "foo:aa", "aa", true}, + {"foo:", "foo:aa", "aa", true}, + {"foo:aa:aa", "foo:", "aa", true}, + {"foo:aa:bb", "foo:", "aa", true}, + {"foo:bb:aa", "foo:", "aa", false}, + {"foo:cc:aa", "foo:", "aa", false}, + {"foo:bb:bb", "foo:", "bb", true}, + {"foo:cc:aa", "foo:", "bb", false}, + } + for i, tt := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + if got := NewItemString(tt.str).Less(NewPrefixPivotQuery(tt.prefix, tt.pivot)); got != tt.want { + t.Errorf("ItemString.Less() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_bTreeIndexImpl_Find(t *testing.T) { + exampleItems := []string{"bar:xx", "foo:aa:aa", "foo:aa:bb", "foo:bb:aa", "foo:bb:cc", "foo:cc:zz", "xx:yy:zz"} + tests := []struct { + items []string + q ItemQuery + wantItem string + wantFound bool + }{ + // Test cases for PrefixQuery: + { + items: exampleItems, + q: PrefixQuery(""), + wantItem: "bar:xx", + wantFound: true, + }, + { + // Find("foo:aa") => "foo:aa:aa" + items: exampleItems, + q: PrefixQuery("foo:aa"), + wantItem: "foo:aa:aa", + wantFound: true, + }, + { + // Find("foo:bb") => "foo:bb:aa" + items: exampleItems, + q: PrefixQuery("foo:bb"), + wantItem: "foo:bb:aa", + wantFound: true, + }, + // Test cases for PrefixPivotQuery: + { + // Find(Prefix: "foo:", Pivot: "aa") => "foo:bb:aa" + items: exampleItems, + q: NewPrefixPivotQuery("foo:", "aa"), + wantItem: "foo:bb:aa", + wantFound: true, + }, + { + // Find(Prefix: "foo:", Pivot: "bb") => "foo:cc:zz" + items: exampleItems, + q: NewPrefixPivotQuery("foo:", "bb"), + wantItem: "foo:cc:zz", + wantFound: true, + }, + } + for i, tt := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + i := newIndex(nil) + for _, item := range tt.items { + i.Put(NewStringStringItem("", item, "")) + } + gotItem, gotFound := i.Find(tt.q) + if gotItem.String() != tt.wantItem { + t.Errorf("bTreeIndexImpl.Find() gotRetit = %v, want %v", gotItem.String(), tt.wantItem) + } + if gotFound != tt.wantFound { + t.Errorf("bTreeIndexImpl.Find() gotFound = %v, want %v", gotFound, tt.wantFound) + } + }) + } +} + +func Test_Queries_List(t *testing.T) { + exampleItems := []string{"bar:xx", "foo:aa:aa", "foo:aa:bb", "foo:bb:aa", "foo:bb:cc", "foo:cc:zz", "xx:yy:zz"} + tests := []struct { + items []string + q ItemQuery + want []string + }{ + // Test cases for PrefixQuery: + { + items: exampleItems, + q: PrefixQuery(""), + want: exampleItems, + }, + { + // List("foo:aa") => {"foo:aa:aa", "foo:aa:bb"} + items: exampleItems, + q: PrefixQuery("foo:aa"), + want: exampleItems[1:3], + }, + { + // List("foo:bb") => {"foo:bb:aa", "foo:bb:cc"} + items: exampleItems, + q: PrefixQuery("foo:bb"), + want: exampleItems[3:5], + }, + // Test cases for PrefixPivotQuery: + { + // List(Prefix: "foo:", Pivot: "aa") => {"foo:bb:aa", "foo:bb:cc", "foo:cc:zz"} + items: exampleItems, + q: NewPrefixPivotQuery("foo:", "aa"), + want: exampleItems[3:6], + }, + { + // List(Prefix: "foo:", Pivot: "bb") => {"foo:cc:zz"} + items: exampleItems, + q: NewPrefixPivotQuery("foo:", "bb"), + want: exampleItems[5:6], + }, + } + for i, tt := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + i := newIndex(nil) + for _, item := range tt.items { + i.Put(NewStringStringItem("", item, "")) + } + got := make([]string, 0, len(tt.want)) + i.List(tt.q, func(it Item) bool { + got = append(got, it.String()) + return true + }) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("bTreeIndexImpl.List() got = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_index_List(t *testing.T) { + var ( + key1 = NewStringStringItem("id", "Bar.foo.com:default:foo", "sample-file.yaml", "path:sample-file.yaml") + key2 = NewStringStringItem("id", "Bar.foo.com:default:other-foo", "other-file.yaml", "path:other-file.yaml") + key3 = NewStringStringItem("id", "Bar.foo.com:custom:foo", "sample-file.yaml", "path:sample-file.yaml") + key4 = NewStringStringItem("id", "Baz.foo.com:default:foo", "sample-file.yaml", "path:sample-file.yaml") + ) + sampleInit := func(i Index) { + i.Put(key1) + i.Put(key2) + i.Put(key3) + i.Put(key4) + } + sampleCleanup := func(i Index) { + i.Delete(key1) + i.Delete(key2) + i.Delete(key3) + i.Delete(key4) + } + tests := []struct { + initFunc func(i Index) + cleanupFunc func(i Index) + prefix string + want []ValueItem + }{ + { + initFunc: sampleInit, + cleanupFunc: sampleCleanup, + prefix: "path", + want: []ValueItem{key2, key3, key1, key4}, // sorted in order of the index, i.e. the files, and THEN the actual values + }, + { + initFunc: sampleInit, + cleanupFunc: sampleCleanup, + prefix: "path:sample-file.yaml", + want: []ValueItem{key3, key1, key4}, + }, + { + initFunc: sampleInit, + cleanupFunc: sampleCleanup, + prefix: "id:Bar.foo.com", + want: []ValueItem{key3, key1, key2}, + }, + { + initFunc: sampleInit, + cleanupFunc: sampleCleanup, + prefix: "id:Baz.foo.com", + want: []ValueItem{key4}, + }, + { + initFunc: sampleInit, + cleanupFunc: sampleCleanup, + prefix: "id:Bar.foo.com:default", + want: []ValueItem{key1, key2}, + }, + { + initFunc: sampleInit, + cleanupFunc: sampleCleanup, + prefix: "id:Bar.foo.com:default:foo", + want: []ValueItem{key1}, + }, + } + for i, tt := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + btreeIndex := newIndex(nil) + tt.initFunc(btreeIndex) + wantStr := make([]string, 0, len(tt.want)) + for _, it := range tt.want { + wantStr = append(wantStr, it.String()) + } + + got := []string{} + btreeIndex.List(PrefixQuery(tt.prefix), func(it Item) bool { + got = append(got, it.GetValueItem().String()) + return true + }) + if !reflect.DeepEqual(got, wantStr) { + t.Errorf("got = %v, want %v", got, wantStr) + } + tt.cleanupFunc(btreeIndex) + if l := btreeIndex.Internal().Len(); l != 0 { + if !reflect.DeepEqual(got, wantStr) { + t.Errorf("expected clean tree, got len = %d", l) + } + } + }) + } +} From eb2a8a29cf462f6191dc625a43a0c49fdf304405 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 24 Feb 2021 23:26:55 +0200 Subject: [PATCH 137/149] Improve ListUnique greatly, remove the hacky AdvanceLastChar, and add unit tests and comments --- .../filesystem/unstructured/btree/utils.go | 69 +++++++------- .../unstructured/btree/utils_test.go | 94 +++++++++++++++++++ 2 files changed, 127 insertions(+), 36 deletions(-) create mode 100644 pkg/storage/filesystem/unstructured/btree/utils_test.go diff --git a/pkg/storage/filesystem/unstructured/btree/utils.go b/pkg/storage/filesystem/unstructured/btree/utils.go index 107d197d..7a7856dc 100644 --- a/pkg/storage/filesystem/unstructured/btree/utils.go +++ b/pkg/storage/filesystem/unstructured/btree/utils.go @@ -1,59 +1,56 @@ package btree -func GetValueString(index BTreeIndex, it Item) string { - it, ok := index.Get(it) +import "fmt" + +// GetValueString searches the Index for an element that is equal to the +// search parameter. The function tries to cast the ValueItem's Value +// to either a string or fmt.Stringer, whose value is then returned. If +// this is unsuccessful, or the item doesn't exist, an empty string is returned. +// If the search is successful, this function returns true. +func GetValueString(index Index, search AbstractItem) (string, bool) { + it, ok := index.Get(search) if !ok { - return "" + return "", false } valItem := it.GetValueItem() if valItem == nil { - return "" + return "", true } - return valItem.ValueString() -} - -// AdvanceLastChar sets the last character to the next available char, e.g. "Hello" -> "Hellp". -// This can be used when listing as a way to not use an inclusive start parameter. -func AdvanceLastChar(str string) string { - // TODO: if the last char already is 255, this should actually bump the second-last char, etc. - return str[:len(str)-1] + string(str[len(str)-1]+1) + switch s := valItem.Value().(type) { + case string: + return s, true + case fmt.Stringer: + return s.String(), true + } + return "", true } -// UniqueIterFunc is used in ListUnique -type UniqueIterFunc func(it ValueItem) (start string, exclusive bool) +// UniqueIterFunc is used in ListUnique. +type UniqueIterFunc func(it ValueItem) string // ListUnique traverses the index in ascending order for each item under prefix. -// However, when an item is matched, the UniqueIterFunc iterator decides where to +// However, when an item is matched, the UniqueIterFunc return value decides where to // start the search the next time. One possible implementation is to return the -// submatch (i.e. strings.TrimPrefix(it.Key(), prefix)) and set exclusive to true, -// which will make ListUnique skip all other "duplicate" items in the same prefix space. +// name of common part you don't want to see again (e.g. "aa:" in the example below), +// which will make ListUnique skip all other "duplicate" "foo:aa:*" items. // // Example: // index = {"bar:aa", "foo:aa:bb", "foo:aa:cc", "foo:aa:cc:dd", "foo:bb:cc", "foo:bb:dd", "foo:dd:ee"} // prefix = "foo:" // iterator returns exclusive == true, and strings.Split(it.Key)[1], e.g. "foo:aa:cc:dd" => "aa" // Then the following items will be visited: {"foo:aa:bb", "foo:bb:cc", "foo:dd:ee"} -func ListUnique(index BTreeIndex, prefix string, iterator UniqueIterFunc) { - start := "" // indicates what submatch string to start matching from (inclusive as per List() default behavior) - exclusive := false +func ListUnique(index Index, prefix string, iterator UniqueIterFunc) { + it, found := index.Find(PrefixQuery(prefix)) + if !found { + return + } + q := NewPrefixPivotQuery(prefix, iterator(it.GetValueItem())).(*prefixPivotQuery) + for { - // Traverse the list of all IDs in the system, but only read one ID at a time, then exit - // the iteration. Next time the iteration is started, "start" is forwarded so the list "jumps" - // all the duplicate items in between. - // The return value for a successful list is 1, but if it is 0 we know we have traversed all items - if index.List(prefix, start, func(it Item) bool { - // next round; start from the returned submatch - start, exclusive = iterator(it.GetValueItem()) - // If exclusive is true, this submatch will not be included in the next List call, as the last - // char is now advanced just slightly - if exclusive { - start = AdvanceLastChar(start) - } - // Always traverse just one object - return false - }) == 0 { - // Break when there are no more items under the prefix + it, found := index.Find(q) + if !found { break } + q.Pivot = iterator(it.GetValueItem()) } } diff --git a/pkg/storage/filesystem/unstructured/btree/utils_test.go b/pkg/storage/filesystem/unstructured/btree/utils_test.go new file mode 100644 index 00000000..51a18a49 --- /dev/null +++ b/pkg/storage/filesystem/unstructured/btree/utils_test.go @@ -0,0 +1,94 @@ +package btree + +import ( + "reflect" + "strconv" + "strings" + "testing" +) + +func TestListUnique(t *testing.T) { + allItems := []string{"bar:aa", "foo:aa:bb", "foo:aa:cc", "foo:aa:cc:dd", "foo:aaaa:bla", "foo:bb:cc", "foo:bb:dd", "foo:dd:ee", "xyz:foo"} + tests := []struct { + items []string + prefix string + withEndingSep bool + want []string + }{ + // Note the difference between these examples: + { + items: allItems, + prefix: "foo:", + withEndingSep: true, + want: []string{"foo:aa:bb", "foo:aaaa:bla", "foo:bb:cc", "foo:dd:ee"}, + }, + { + items: allItems, + prefix: "foo:", + withEndingSep: false, + want: []string{"foo:aa:bb", "foo:bb:cc", "foo:dd:ee"}, + }, + } + for i, tt := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + i := newIndex(nil) + for _, item := range tt.items { + i.Put(NewStringStringItem("", item, "")) + } + + endingSep := "" + if tt.withEndingSep { + endingSep = ":" + } + + got := make([]string, 0, len(tt.want)) + ListUnique(i, tt.prefix, func(it ValueItem) string { + str := it.GetValueItem().String() + got = append(got, str) + return strings.Split(strings.TrimPrefix(str, tt.prefix), ":")[0] + endingSep + }) + + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("TestListUnique() got = %v, want %v", got, tt.want) + } + }) + } +} + +func TestGetValueString(t *testing.T) { + tests := []struct { + key string + value string + search string + want string + found bool + }{ + { + key: "foo:bar", + value: "hello", + search: "foo:bar", + want: "hello", + found: true, + }, + { + key: "foo:bar", + value: "hello", + search: "notfound", + want: "", + found: false, + }, + } + for i, tt := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + i := newIndex(nil) + i.Put(NewStringStringItem("", tt.key, tt.value)) + got, found := GetValueString(i, PrefixQuery(tt.search)) + if got != tt.want { + t.Errorf("GetValueString() = %v, want %v", got, tt.want) + } + if found != tt.found { + t.Errorf("GetValueString() = %v, want %v", found, tt.found) + } + }) + } +} From c879b47ebfabc22e495e7c2782c9524e6bd0be4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 24 Feb 2021 23:30:26 +0200 Subject: [PATCH 138/149] Update the unstructured filefinder to use the latest BTree impl --- .../unstructured/filefinder_mapped.go | 124 ++++++++---------- 1 file changed, 54 insertions(+), 70 deletions(-) diff --git a/pkg/storage/filesystem/unstructured/filefinder_mapped.go b/pkg/storage/filesystem/unstructured/filefinder_mapped.go index bd9c6e0e..b843795d 100644 --- a/pkg/storage/filesystem/unstructured/filefinder_mapped.go +++ b/pkg/storage/filesystem/unstructured/filefinder_mapped.go @@ -37,7 +37,7 @@ func NewGenericFileFinder(contentTyper filesystem.ContentTyper, fs filesystem.Fi return &GenericFileFinder{ contentTyper: contentTyper, fs: fs, - index: btree.NewBTreeVersionedIndex(), + index: btree.NewVersionedIndex(), mu: &sync.RWMutex{}, } } @@ -56,7 +56,7 @@ type GenericFileFinder struct { contentTyper filesystem.ContentTyper fs filesystem.Filesystem - index btree.BTreeVersionedIndex + index btree.VersionedIndex // mu guards index mu *sync.RWMutex } @@ -69,7 +69,7 @@ func (f *GenericFileFinder) ContentTyper() filesystem.ContentTyper { return f.contentTyper } -func (f *GenericFileFinder) versionedIndex(ctx context.Context) (btree.BTreeIndex, error) { +func (f *GenericFileFinder) versionedIndex(ctx context.Context) (btree.Index, error) { i, ok := f.index.VersionedTree(core.GetVersionRef(ctx).Branch()) if ok { return i, nil @@ -90,12 +90,12 @@ func (f *GenericFileFinder) ObjectPath(ctx context.Context, id core.UnversionedO } // Lookup the BTree item for the given ID - p, ok := index.Get(newIDItem(id, "")) + p, ok := index.Get(queryObject(id)) if !ok { return "", utilerrs.NewAggregate([]error{ErrNotTracked, core.NewErrNotFound(id)}) } // Return the path - return p.GetValueItem().ValueString(), nil + return p.GetValueItem().Value().(string), nil } // ObjectsAt retrieves the ObjectIDs in the file with the given relative file path. @@ -119,10 +119,10 @@ func (f *GenericFileFinder) ObjectsAt(ctx context.Context, path string) (core.Un return idSet, nil } -func (f *GenericFileFinder) objectsAt(index btree.BTreeIndex, path string) core.UnversionedObjectIDSet { +func (f *GenericFileFinder) objectsAt(index btree.Index, path string) core.UnversionedObjectIDSet { // Traverse the objects belonging to the given path index ids := core.NewUnversionedObjectIDSet() - index.List(idPathIndexField+":"+path, "", func(it btree.Item) bool { + index.List(queryPath(path), func(it btree.Item) bool { // Insert each objectID belonging to that path into the set ids.Insert(it.GetValueItem().Key().(core.UnversionedObjectID)) return true @@ -150,12 +150,12 @@ func (f *GenericFileFinder) ListGroupKinds(ctx context.Context) ([]core.GroupKin gks := []core.GroupKind{} // List GroupKinds directly under "id:*" prefix := idField + ":" - // Extract the GroupKind from the visited item, and return the groupkind exclusively, so it + // Extract the GroupKind from the visited item, and return the groupkind, so it // won't be visited again - btree.ListUnique(index, prefix, func(it btree.ValueItem) (string, bool) { + btree.ListUnique(index, prefix, func(it btree.ValueItem) string { gk := it.Key().(core.UnversionedObjectID).GroupKind() gks = append(gks, gk) - return gk.String(), true + return gk.String() + ":" // note: important to return this, see btree/utils_test.go why }) return gks, nil } @@ -183,13 +183,13 @@ func (f *GenericFileFinder) ListNamespaces(ctx context.Context, gk core.GroupKin nsSet := sets.NewString() // List namespaces under "id:{groupkind}:*" - prefix := idField + ":" + gk.String() + ":" + prefix := idForGroupKind(gk) // Extract the namespace from the visited item, and return the groupkind exclusively, so it // won't be visited again - btree.ListUnique(index, prefix, func(it btree.ValueItem) (string, bool) { + btree.ListUnique(index, prefix, func(it btree.ValueItem) string { ns := it.Key().(core.UnversionedObjectID).ObjectKey().Namespace nsSet.Insert(ns) - return ns, true + return ns + ":" // note: important to return this, see btree/utils_test.go why }) return nsSet, nil } @@ -211,9 +211,8 @@ func (f *GenericFileFinder) ListObjectIDs(ctx context.Context, gk core.GroupKind } ids := core.NewUnversionedObjectIDSet() - // List ObjectIDs under this "folder" - base := idField + ":" + gk.String() + ":" + namespace + ":" - index.List(base, "", func(it btree.Item) bool { + // List ObjectIDs under "id:{groupkind}:{ns}:*" + index.List(queryNamespace(gk, namespace), func(it btree.Item) bool { ids.Insert(it.GetValueItem().Key().(core.UnversionedObjectID)) return true }) @@ -231,16 +230,7 @@ func (f *GenericFileFinder) ChecksumForPath(ctx context.Context, path string) (s if err != nil { return "", false } - return f.checksumForPath(index, path) -} - -func (f *GenericFileFinder) checksumForPath(index btree.BTreeIndex, path string) (string, bool) { - // Get the checksum for the given path at the given version - item, ok := index.Get(newChecksumItem(path, "")) - if !ok { - return "", false - } - return item.GetValueItem().Value().(ChecksumPath).Checksum, true + return btree.GetValueString(index, queryChecksum(path)) } // MoveFile moves an internal mapping from oldPath to newPath. moved == true if the oldPath @@ -271,7 +261,7 @@ func (f *GenericFileFinder) MoveFile(ctx context.Context, oldPath, newPath strin // a) getting the checksum for the old path // b) assigning that checksum to the new path // c) deleting the item for the old path - checksum, ok := f.checksumForPath(index, oldPath) + checksum, ok := btree.GetValueString(index, queryChecksum(oldPath)) if !ok { logrus.Error("MoveFile: Expected checksum to be available, but wasn't") // if this happens; newPath won't be mapped to any checksum; but nothing worse @@ -318,10 +308,10 @@ func (f *GenericFileFinder) SetMapping(ctx context.Context, state ChecksumPath, } // internal method; not using any mutex; caller's responsibility -func (f *GenericFileFinder) setIDsAtPath(index btree.BTreeIndex, path, checksum string, newIDs core.UnversionedObjectIDSet) (added, duplicates, removed core.UnversionedObjectIDSet) { +func (f *GenericFileFinder) setIDsAtPath(index btree.Index, path, checksum string, newIDs core.UnversionedObjectIDSet) (added, duplicates, removed core.UnversionedObjectIDSet) { // If there are no new ids, delete the checksum mapping if newIDs.Len() == 0 { - index.Delete(newChecksumItem(path, "")) + index.Delete(queryChecksum(path)) } else { // Update the checksum. index.Put(newChecksumItem(path, checksum)) @@ -344,7 +334,7 @@ func (f *GenericFileFinder) setIDsAtPath(index btree.BTreeIndex, path, checksum _ = added.ForEach(func(addedID core.UnversionedObjectID) error { itemToAdd := newIDItem(addedID, path) // Check if this ID already exists in some other file. TODO: Is the second check needed? - if otherFile := btree.GetValueString(index, itemToAdd); len(otherFile) != 0 && otherFile != path { + if otherFile, _ := btree.GetValueString(index, itemToAdd); len(otherFile) != 0 && otherFile != path { // If so; it is a duplicate; move it to duplicates added.Delete(addedID) duplicates.Insert(addedID) @@ -360,7 +350,7 @@ func (f *GenericFileFinder) setIDsAtPath(index btree.BTreeIndex, path, checksum // Remove the removed items _ = removed.ForEach(func(removedID core.UnversionedObjectID) error { - index.Delete(newIDItem(removedID, "")) + index.Delete(queryObject(removedID)) return nil }) @@ -433,7 +423,7 @@ func (f *GenericFileFinder) ResetMappings(ctx context.Context, m map[ChecksumPat // In the resulting mappings; no duplicates are allowed (to avoid "races" at random // between different duplicates otherwise) _ = duplicates.ForEach(func(id core.UnversionedObjectID) error { - index.Delete(newIDItem(id, "")) + index.Delete(queryObject(id)) return nil }) @@ -467,59 +457,53 @@ func (f *GenericFileFinder) DeleteVersionRef(head core.VersionRef) { f.index.DeleteVersionedTree(head.Branch()) } +func idForGroupKind(gk core.GroupKind) string { return idField + ":" + gk.String() + ":" } +func idForNamespace(gk core.GroupKind, ns string) string { return idForGroupKind(gk) + ns + ":" } +func queryNamespace(gk core.GroupKind, ns string) btree.ItemQuery { + return btree.PrefixQuery(idForNamespace(gk, ns)) +} + +func idForObject(id core.UnversionedObjectID) string { + return idForNamespace(id.GroupKind(), id.ObjectKey().Namespace) + id.ObjectKey().Name +} +func queryObject(id core.UnversionedObjectID) btree.ItemQuery { + return btree.PrefixQuery(idForObject(id)) +} + +func queryPath(path string) btree.ItemQuery { return btree.PrefixQuery(pathIdxField + ":" + path) } +func queryChecksum(path string) btree.ItemQuery { return btree.PrefixQuery(checksumField + ":" + path) } + +func newChecksumItem(path, checksum string) btree.ValueItem { + return btree.NewStringStringItem(checksumField, path, checksum) +} + func newIDItem(id core.UnversionedObjectID, path string) btree.ValueItem { - return &idItemImpl{id: id, path: path} + return &idItemImpl{ + ItemString: btree.NewItemString(idForObject(id)), + id: id, + path: path, + } } type idItemImpl struct { + btree.ItemString id core.UnversionedObjectID path string } const ( - idField = "id" - idPathIndexField = "path" - + idField = "id" + pathIdxField = "path" checksumField = "chk" ) -func (i *idItemImpl) Less(item btree.OriginalBTreeItem) bool { - return i.String() < item.(btree.Item).String() -} -func (i *idItemImpl) String() string { return idField + ":" + i.KeyString() } -func (i *idItemImpl) GetValueItem() btree.ValueItem { return i } -func (i *idItemImpl) GetUnversionedObjectID() core.UnversionedObjectID { return i.id } -func (i *idItemImpl) Value() interface{} { return i.path } -func (i *idItemImpl) ValueString() string { return i.path } - -func (i *idItemImpl) Key() interface{} { return i.id } -func (i *idItemImpl) KeyString() string { - // TODO: Cache this and the output of IndexedPtrs()? - return i.id.GroupKind().String() + ":" + i.id.ObjectKey().Namespace + ":" + i.id.ObjectKey().Name -} +func (i *idItemImpl) GetValueItem() btree.ValueItem { return i } +func (i *idItemImpl) Key() interface{} { return i.id } +func (i *idItemImpl) Value() interface{} { return i.path } func (i *idItemImpl) IndexedPtrs() []btree.Item { var self btree.ValueItem = i return []btree.Item{ - btree.NewIndexedPtr(&self, idPathIndexField+":"+i.path), + btree.NewIndexedPtr(pathIdxField+":"+i.path, &self), } } - -func newChecksumItem(path, checksum string) btree.ValueItem { - return &checksumItemImpl{ChecksumPath{Path: path, Checksum: checksum}} -} - -type checksumItemImpl struct { - ChecksumPath -} - -func (i *checksumItemImpl) Less(item btree.OriginalBTreeItem) bool { - return i.String() < item.(btree.Item).String() -} -func (i *checksumItemImpl) String() string { return checksumField + ":" + i.KeyString() } -func (i *checksumItemImpl) GetValueItem() btree.ValueItem { return i } -func (i *checksumItemImpl) Key() interface{} { return i.Path } -func (i *checksumItemImpl) KeyString() string { return i.Path } -func (i *checksumItemImpl) Value() interface{} { return i.ChecksumPath } -func (i *checksumItemImpl) ValueString() string { return i.Checksum } -func (i *checksumItemImpl) IndexedPtrs() []btree.Item { return nil } From b492c2d823fc02feea5a0e9d83a380d7ba181637 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 14 Apr 2021 14:15:51 +0300 Subject: [PATCH 139/149] Initial stab at a git interface internally --- .../transactional/distributed/git/git.go | 202 +++---------- .../transactional/distributed/git/gogit.go | 278 ++++++++++++++++++ .../distributed/git/gogit_test.go | 124 ++++++++ .../distributed/git/interfaces.go | 21 ++ .../transactional/distributed/git/options.go | 41 +++ ...2ba594b9d16312e7c923ff9ef09c65d7_README.md | 18 ++ ...3ff486debbf525c460797b144c5d641f_README.md | 156 ++++++++++ .../distributed/git/transport.go | 4 + 8 files changed, 686 insertions(+), 158 deletions(-) create mode 100644 pkg/storage/client/transactional/distributed/git/gogit.go create mode 100644 pkg/storage/client/transactional/distributed/git/gogit_test.go create mode 100644 pkg/storage/client/transactional/distributed/git/interfaces.go create mode 100644 pkg/storage/client/transactional/distributed/git/options.go create mode 100644 pkg/storage/client/transactional/distributed/git/testdata/19bdfaa92ba594b9d16312e7c923ff9ef09c65d7_README.md create mode 100644 pkg/storage/client/transactional/distributed/git/testdata/fb15f0063ff486debbf525c460797b144c5d641f_README.md diff --git a/pkg/storage/client/transactional/distributed/git/git.go b/pkg/storage/client/transactional/distributed/git/git.go index 4529d5a4..482e4110 100644 --- a/pkg/storage/client/transactional/distributed/git/git.go +++ b/pkg/storage/client/transactional/distributed/git/git.go @@ -7,12 +7,8 @@ import ( "io/ioutil" "os" "sync" - "time" "github.com/fluxcd/go-git-providers/gitprovider" - git "github.com/go-git/go-git/v5" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/object" log "github.com/sirupsen/logrus" "github.com/weaveworks/libgitops/pkg/storage/client/transactional" "github.com/weaveworks/libgitops/pkg/storage/client/transactional/distributed" @@ -24,38 +20,22 @@ var ( ErrNotStarted = errors.New("the LocalClone hasn't been started (and hence, cloned) yet") // ErrCannotWriteToReadOnly happens if you try to do a write operation for a non-authenticated Git repo. ErrCannotWriteToReadOnly = errors.New("the LocalClone is read-only, cannot write") + // ErrWorktreeClean happens if there are no modified files in the worktree when trying to create a commit. + ErrWorktreeClean = errors.New("there are no modified files, cannot create new commit") + // ErrWorktreeNotClean happens if there are modified files in the worktree when trying to create a new branch. + ErrWorktreeNotClean = errors.New("there are uncommitted changes, cannot create new branch") ) -const ( - defaultBranch = "master" -) - -// LocalCloneOptions provides options for the LocalClone. -// TODO: Refactor this into the controller-runtime Options factory pattern. -type LocalCloneOptions struct { - Branch string // default "master" - - // Authentication method. If unspecified, this clone is read-only. - AuthMethod AuthMethod -} - -func (o *LocalCloneOptions) Default() { - if o.Branch == "" { - o.Branch = defaultBranch - } -} - // LocalClone is an implementation of both a Remote, and a BranchManager, for Git. var _ transactional.BranchManager = &LocalClone{} var _ distributed.Remote = &LocalClone{} // Create a new Remote and BranchManager implementation using Git. The repo is cloned immediately // in the constructor, you can use ctx to enforce a timeout for the clone. -func NewLocalClone(ctx context.Context, repoRef gitprovider.RepositoryRef, opts LocalCloneOptions) (*LocalClone, error) { +func NewLocalClone(ctx context.Context, repoRef gitprovider.RepositoryRef, opts ...Option) (*LocalClone, error) { log.Info("Initializing the Git repo...") - // Default the options - opts.Default() + o := defaultOpts().ApplyOptions(opts) // Create a temporary directory for the clone tmpDir, err := ioutil.TempDir("", "libgitops") @@ -66,7 +46,7 @@ func NewLocalClone(ctx context.Context, repoRef gitprovider.RepositoryRef, opts d := &LocalClone{ repoRef: repoRef, - opts: opts, + opts: o, cloneDir: tmpDir, lock: &sync.Mutex{}, } @@ -79,8 +59,8 @@ func NewLocalClone(ctx context.Context, repoRef gitprovider.RepositoryRef, opts log.Infof("Running in read-only mode, won't write status back to the repo") } - // Clone the repo - if err := d.clone(ctx); err != nil { + d.impl, err = NewGoGit(ctx, repoRef, tmpDir, o) + if err != nil { return nil, err } @@ -88,20 +68,22 @@ func NewLocalClone(ctx context.Context, repoRef gitprovider.RepositoryRef, opts } // LocalClone is an implementation of both a Remote, and a BranchManager, for Git. +// TODO: Make so that the LocalClone does NOT interfere with any reads or writes by the Client using some shared +// mutex. type LocalClone struct { // user-specified options repoRef gitprovider.RepositoryRef - opts LocalCloneOptions + opts *Options // the temporary directory used for the clone cloneDir string - // go-git objects. wt is the worktree of the repo, persistent during the lifetime of repo. - repo *git.Repository - wt *git.Worktree - // the lock for git operations (so no ops are done simultaneously) lock *sync.Mutex + + impl Interface + + // TODO: Keep track of current worktree branch } func (d *LocalClone) Dir() string { @@ -109,7 +91,7 @@ func (d *LocalClone) Dir() string { } func (d *LocalClone) MainBranch() string { - return d.opts.Branch + return d.opts.MainBranch } func (d *LocalClone) RepositoryRef() gitprovider.RepositoryRef { @@ -123,9 +105,9 @@ func (d *LocalClone) canWrite() bool { // verifyRead makes sure it's ok to start a read-something-from-git process func (d *LocalClone) verifyRead() error { // Safeguard against not starting yet - if d.wt == nil { + /*if d.wt == nil { return fmt.Errorf("cannot pull: %w", ErrNotStarted) - } + }*/ return nil } @@ -142,52 +124,6 @@ func (d *LocalClone) verifyWrite() error { return nil } -func (d *LocalClone) clone(ctx context.Context) error { - // Lock the mutex now that we're starting, and unlock it when exiting - d.lock.Lock() - defer d.lock.Unlock() - - cloneURL := d.repoRef.GetCloneURL(d.opts.AuthMethod.TransportType()) - - log.Infof("Starting to clone the repository %s", d.repoRef) - // Do a clone operation to the temporary directory - var err error - d.repo, err = git.PlainCloneContext(ctx, d.Dir(), false, &git.CloneOptions{ - URL: cloneURL, - Auth: d.opts.AuthMethod, - ReferenceName: plumbing.NewBranchReferenceName(d.opts.Branch), - SingleBranch: true, - NoCheckout: false, - //Depth: 1, // ref: https://github.com/src-d/go-git/issues/1143 - RecurseSubmodules: 0, - Progress: nil, - Tags: git.NoTags, - }) - // Handle errors - if errors.Is(err, context.DeadlineExceeded) { - return fmt.Errorf("git clone operation timed out: %w", err) - } else if errors.Is(err, context.Canceled) { - return fmt.Errorf("git clone was cancelled: %w", err) - } else if err != nil { - return fmt.Errorf("git clone error: %v", err) - } - - // Populate the worktree pointer - d.wt, err = d.repo.Worktree() - if err != nil { - return fmt.Errorf("git get worktree error: %v", err) - } - - // Get the latest HEAD commit and report it to the user - ref, err := d.repo.Head() - if err != nil { - return err - } - - log.Infof("Repo cloned; HEAD commit is %s", ref.Hash()) - return nil -} - func (d *LocalClone) Pull(ctx context.Context) error { // Lock the mutex now that we're starting, and unlock it when exiting d.lock.Lock() @@ -202,66 +138,26 @@ func (d *LocalClone) Pull(ctx context.Context) error { return err } - // Perform the git pull operation. The context carries a timeout - log.Trace("Starting pull operation") - err := d.wt.PullContext(ctx, &git.PullOptions{ - Auth: d.opts.AuthMethod, - SingleBranch: true, - }) - - // Handle errors - if errors.Is(err, git.NoErrAlreadyUpToDate) { - // all good, nothing more to do - log.Trace("Pull already up-to-date") - return nil - } else if errors.Is(err, context.DeadlineExceeded) { - return fmt.Errorf("git pull operation timed out: %w", err) - } else if errors.Is(err, context.Canceled) { - return fmt.Errorf("git pull was cancelled: %w", err) - } else if err != nil { - return fmt.Errorf("git pull error: %v", err) + if err := d.impl.Pull(ctx); err != nil { + return err } - log.Trace("Pulled successfully") - - // Get current HEAD - ref, err := d.repo.Head() + ref, err := d.impl.CommitAt(ctx, "") // HEAD if err != nil { return err } - log.Infof("New commit observed %s", ref.Hash()) + log.Infof("New commit observed %s", ref) return nil } func (d *LocalClone) Push(ctx context.Context) error { - // TODO: Push a specific branch only. Use opts.RefSpecs? - // Perform the git push operation. The context carries a timeout log.Debug("Starting push operation") - err := d.repo.PushContext(ctx, &git.PushOptions{ - Auth: d.opts.AuthMethod, - }) - - // Handle errors - if errors.Is(err, git.NoErrAlreadyUpToDate) { - // TODO: Is it good if there's nothing more to do; or a failure if there's nothing to push? - log.Trace("Push already up-to-date") - return nil - } else if errors.Is(err, context.DeadlineExceeded) { - return fmt.Errorf("git push operation timed out: %w", err) - } else if errors.Is(err, context.Canceled) { - return fmt.Errorf("git push was cancelled: %w", err) - } else if err != nil { - return fmt.Errorf("git push error: %v", err) - } - - log.Trace("Pushed successfully") - - return nil + return d.impl.Push(ctx, "") // TODO: only push the current branch } -func (d *LocalClone) CreateBranch(_ context.Context, branch string) error { +func (d *LocalClone) CreateBranch(ctx context.Context, branch string) error { // Lock the mutex now that we're starting, and unlock it when exiting d.lock.Lock() defer d.lock.Unlock() @@ -273,13 +169,18 @@ func (d *LocalClone) CreateBranch(_ context.Context, branch string) error { return err } - return d.wt.Checkout(&git.CheckoutOptions{ - Branch: plumbing.NewBranchReferenceName(branch), - Create: true, - }) + // Sanity-check that the worktree is clean before switching branches + if clean, err := d.impl.IsWorktreeClean(ctx); err != nil { + return err + } else if !clean { + return ErrWorktreeNotClean + } + + // Create and switch to the new branch + return d.impl.CheckoutBranch(ctx, branch, false, true) } -func (d *LocalClone) ResetToCleanBranch(_ context.Context, branch string) error { +func (d *LocalClone) ResetToCleanBranch(ctx context.Context, branch string) error { // Lock the mutex now that we're starting, and unlock it when exiting d.lock.Lock() defer d.lock.Unlock() @@ -289,17 +190,12 @@ func (d *LocalClone) ResetToCleanBranch(_ context.Context, branch string) error return err } - // Best-effort clean - _ = d.wt.Clean(&git.CleanOptions{ - Dir: true, - }) + // Best-effort clean, don't check the error + _ = d.impl.Clean(ctx) // Force-checkout the main branch // TODO: If a transaction (non-branched) was able to commit, and failed after that // we need to roll back that commit. - return d.wt.Checkout(&git.CheckoutOptions{ - Branch: plumbing.NewBranchReferenceName(branch), - Force: true, - }) + return d.impl.CheckoutBranch(ctx, branch, true, false) // TODO: Do a pull here too? } @@ -317,26 +213,16 @@ func (d *LocalClone) Commit(ctx context.Context, commit transactional.Commit) er return err } - s, err := d.wt.Status() - if err != nil { - return fmt.Errorf("git status failed: %v", err) - } - if s.IsClean() { - log.Debugf("No changed files in git repo, nothing to commit...") - // TODO: Should this be an error instead? - return nil + // Don't commit anything if already clean + if clean, err := d.impl.IsWorktreeClean(ctx); err != nil { + return err + } else if clean { + return ErrWorktreeClean } // Do a commit log.Debug("Committing all local changes") - hash, err := d.wt.Commit(commit.GetMessage().String(), &git.CommitOptions{ - All: true, - Author: &object.Signature{ - Name: commit.GetAuthor().GetName(), - Email: commit.GetAuthor().GetEmail(), - When: time.Now(), - }, - }) + hash, err := d.impl.Commit(ctx, commit) if err != nil { return fmt.Errorf("git commit error: %v", err) } diff --git a/pkg/storage/client/transactional/distributed/git/gogit.go b/pkg/storage/client/transactional/distributed/git/gogit.go new file mode 100644 index 00000000..46dfd16c --- /dev/null +++ b/pkg/storage/client/transactional/distributed/git/gogit.go @@ -0,0 +1,278 @@ +package git + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/fluxcd/go-git-providers/gitprovider" + git "github.com/go-git/go-git/v5" + "github.com/go-git/go-git/v5/config" + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/object" + log "github.com/sirupsen/logrus" + "github.com/weaveworks/libgitops/pkg/storage/client/transactional" + "k8s.io/apimachinery/pkg/util/sets" +) + +func NewGoGit(ctx context.Context, repoRef gitprovider.RepositoryRef, dir string, opts *Options) (Interface, error) { + gg := &goGit{ + repoRef: repoRef, + dir: dir, + lock: &sync.Mutex{}, + opts: opts, + } + // Clone to populate repo & wt + if err := gg.clone(ctx); err != nil { + return nil, err + } + return gg, nil +} + +type goGit struct { + repoRef gitprovider.RepositoryRef + dir string + lock *sync.Mutex + opts *Options + + // go-git objects. wt is the worktree of the repo, persistent during the lifetime of repo. + repo *git.Repository + wt *git.Worktree +} + +func (g *goGit) clone(ctx context.Context) error { + // Lock the mutex now that we're starting, and unlock it when exiting + g.lock.Lock() + defer g.lock.Unlock() + + transportType := gitprovider.TransportTypeHTTPS // default + if g.opts.AuthMethod != nil { + // TODO: parse the URL instead + transportType = g.opts.AuthMethod.TransportType() + } + cloneURL := g.repoRef.GetCloneURL(transportType) + + cloneOpts := &git.CloneOptions{ + URL: cloneURL, + Auth: g.opts.AuthMethod, + SingleBranch: true, + NoCheckout: false, + //Depth: 1, // ref: https://github.com/go-git/go-git/issues/207 + RecurseSubmodules: 0, + Progress: nil, + Tags: git.NoTags, + } + if g.opts.MainBranch != "" { + cloneOpts.ReferenceName = plumbing.NewBranchReferenceName(g.opts.MainBranch) + } + + log.Infof("Starting to clone the repository %s", g.repoRef) + // Do a clone operation to the temporary directory + var err error + g.repo, err = git.PlainCloneContext(ctx, g.dir, false, cloneOpts) + // Handle errors + if errors.Is(err, context.DeadlineExceeded) { + return fmt.Errorf("git clone operation timed out: %w", err) + } else if errors.Is(err, context.Canceled) { + return fmt.Errorf("git clone was cancelled: %w", err) + } else if err != nil { + return fmt.Errorf("git clone error: %v", err) + } + + // Populate the worktree pointer + g.wt, err = g.repo.Worktree() + if err != nil { + return fmt.Errorf("git get worktree error: %v", err) + } + + // Get the latest HEAD commit and report it to the user + ref, err := g.repo.Head() + if err != nil { + return err + } + + log.Infof("Repo cloned; HEAD commit is %s", ref.Hash()) + return nil +} + +func (g *goGit) Pull(ctx context.Context) error { + // Perform the git pull operation. The context carries a timeout + log.Trace("Starting pull operation") + err := g.wt.PullContext(ctx, &git.PullOptions{ + Auth: g.opts.AuthMethod, + SingleBranch: true, + }) + + // Handle errors + if errors.Is(err, git.NoErrAlreadyUpToDate) { + // all good, nothing more to do + log.Trace("Pull already up-to-date") + return nil + } else if errors.Is(err, context.DeadlineExceeded) { + return fmt.Errorf("git pull operation timed out: %w", err) + } else if errors.Is(err, context.Canceled) { + return fmt.Errorf("git pull was cancelled: %w", err) + } else if err != nil { + return fmt.Errorf("git pull error: %v", err) + } + + log.Trace("Pulled successfully") + return nil +} + +func (g *goGit) Push(ctx context.Context, branchName string) error { + opts := &git.PushOptions{ + Auth: g.opts.AuthMethod, + } + // Only push the branch in question, if set + if branchName != "" { + opts.RefSpecs = sameRevisionRefSpecs(branchName) + } + + err := g.repo.PushContext(ctx, opts) + // Handle errors + if errors.Is(err, git.NoErrAlreadyUpToDate) { + // TODO: Is it good if there's nothing more to do; or a failure if there's nothing to push? + log.Trace("Push already up-to-date") + return nil + } else if errors.Is(err, context.DeadlineExceeded) { + return fmt.Errorf("git push operation timed out: %w", err) + } else if errors.Is(err, context.Canceled) { + return fmt.Errorf("git push was cancelled: %w", err) + } else if err != nil { + return fmt.Errorf("git push error: %v", err) + } + + log.Trace("Pushed successfully") + return nil +} + +func (g *goGit) Fetch(ctx context.Context, revision string) error { + // Perform the git pull operation. The context carries a timeout + log.Trace("Starting pull operation") + err := g.repo.FetchContext(ctx, &git.FetchOptions{ + Auth: g.opts.AuthMethod, + // Fetch exactly this ref, and not others + RefSpecs: sameRevisionRefSpecs(revision), + }) + + // Handle errors + if errors.Is(err, git.NoErrAlreadyUpToDate) { + // all good, nothing more to do + log.Trace("Fetch already up-to-date") + return nil + } else if errors.Is(err, context.DeadlineExceeded) { + return fmt.Errorf("git fetch operation timed out: %w", err) + } else if errors.Is(err, context.Canceled) { + return fmt.Errorf("git fetch was cancelled: %w", err) + } else if err != nil { + return fmt.Errorf("git fetch error: %v", err) + } + + log.Trace("Fetched successfully") + return nil +} + +func (g *goGit) CheckoutBranch(ctx context.Context, branch string, force, create bool) error { + return g.wt.Checkout(&git.CheckoutOptions{ + Branch: plumbing.NewBranchReferenceName(branch), + Force: true, + Create: create, + }) +} + +func (g *goGit) Clean(_ context.Context) error { + // This is essentially a "git clean -f -d ." + return g.wt.Clean(&git.CleanOptions{ + Dir: true, + }) +} + +func (g *goGit) FilesChanged(ctx context.Context, fromCommit, toCommit string) (sets.String, error) { + from, err := g.repo.CommitObject(plumbing.NewHash(fromCommit)) + if err != nil { + return nil, err + } + //s, e := cA.Stats() + //s[0]. + //ci, err := g.repo.CommitObjects() + ci, err := g.repo.Log(&git.LogOptions{ + From: plumbing.NewHash(toCommit), + Order: git.LogOrderCommitterTime, + Since: &from.Author.When, + }) + if err != nil { + return nil, err + } + files := sets.NewString() + err = ci.ForEach(func(c *object.Commit) error { + filesChanged, err := c.StatsContext(ctx) + if err != nil { + return err + } + for _, fileChanged := range filesChanged { + files.Insert(fileChanged.Name) + } + return nil + }) + return files, err +} + +func (g *goGit) Commit(_ context.Context, commit transactional.Commit) (string, error) { + hash, err := g.wt.Commit(commit.GetMessage().String(), &git.CommitOptions{ + All: true, + Author: &object.Signature{ + Name: commit.GetAuthor().GetName(), + Email: commit.GetAuthor().GetEmail(), + When: time.Now(), + }, + }) + return hash.String(), err +} +func (g *goGit) IsWorktreeClean(_ context.Context) (bool, error) { + s, err := g.wt.Status() + if err != nil { + return false, fmt.Errorf("git status failed: %v", err) + } + return s.IsClean(), nil +} + +func (g *goGit) ReadFileAtCommit(_ context.Context, commit string, file string) ([]byte, error) { + c, err := g.repo.CommitObject(plumbing.NewHash(commit)) + if err != nil { + return nil, err + } + f, err := c.File(file) + if err != nil { + return nil, err + } + content, err := f.Contents() + if err != nil { + return nil, err + } + return []byte(content), nil +} +func (g *goGit) CommitAt(_ context.Context, branch string) (rev string, err error) { + var reference *plumbing.Reference + if branch != "" { // Point at HEAD + reference, err = g.repo.Head() + } else { + reference, err = g.repo.Reference(plumbing.NewBranchReferenceName(branch), true) + } + if err != nil { + return + } + return reference.Hash().String(), nil +} + +// assume either the revision is a hash or a branch +func sameRevisionRefSpecs(revision string) []config.RefSpec { + if plumbing.IsHash(revision) { + revision = fmt.Sprintf("%s:%s", revision, revision) + } else { + revision = fmt.Sprintf("refs/heads/%s:refs/heads/%s", revision, revision) + } + return []config.RefSpec{config.RefSpec(revision)} +} diff --git a/pkg/storage/client/transactional/distributed/git/gogit_test.go b/pkg/storage/client/transactional/distributed/git/gogit_test.go new file mode 100644 index 00000000..8797e16e --- /dev/null +++ b/pkg/storage/client/transactional/distributed/git/gogit_test.go @@ -0,0 +1,124 @@ +package git + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "reflect" + "strings" + "testing" + + "github.com/fluxcd/go-git-providers/gitprovider" +) + +type filesChangedSubTest struct { + fromCommit string + toCommit string + want []string + wantErr bool +} + +type readFileSubTest struct { + commit string + file string + wantErr bool +} + +func Test_goGit(t *testing.T) { + tests := []struct { + name string + repoRef string + opts []Option + filesChanged []filesChangedSubTest + readFiles []readFileSubTest + }{ + { + name: "default", + repoRef: "https://github.com/weaveworks/libgitops", + filesChanged: []filesChangedSubTest{ + { + fromCommit: "5843c185b995e566fe245f7abb27f4c8cffcae71", + toCommit: "2e1789bf3be4cf03eb3b5b7d778f8cd6c39d40c7", + want: []string{ + "pkg/storage/transaction/git.go", + "pkg/storage/transaction/pullrequest/github/github.go", + "pkg/util/util.go", + }, + }, + { + fromCommit: "5843c185b995e566fe245f7abb27f4c8cffcae71", + toCommit: "5843c185b995e566fe245f7abb27f4c8cffcae71", + want: []string{"pkg/storage/transaction/pullrequest/github/github.go"}, + }, + }, + readFiles: []readFileSubTest{ + { + commit: "19bdfaa92ba594b9d16312e7c923ff9ef09c65d7", + file: "README.md", + }, + { + commit: "fb15f0063ff486debbf525c460797b144c5d641f", + file: "README.md", + }, + }, + }, + } + for i, tt := range tests { + t.Run(fmt.Sprintf("repo_%d", i), func(t *testing.T) { + d, err := ioutil.TempDir("", "") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(d) + ctx := context.Background() + repoRef, err := gitprovider.ParseOrgRepositoryURL(tt.repoRef) + if err != nil { + t.Fatal(err) + } + g, err := NewGoGit(ctx, repoRef, d, defaultOpts().ApplyOptions(tt.opts)) + if err != nil { + t.Fatal(err) + } + Subtest_filesChanged(t, g, tt.filesChanged) + Subtest_readFiles(t, g, tt.readFiles) + }) + } +} + +func Subtest_filesChanged(t *testing.T, g Interface, tests []filesChangedSubTest) { + ctx := context.Background() + for i, tt := range tests { + t.Run(fmt.Sprintf("filesChanged_%d", i), func(t *testing.T) { + got, err := g.FilesChanged(ctx, tt.fromCommit, tt.toCommit) + if (err != nil) != tt.wantErr { + t.Errorf("goGit.FilesChanged() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got.List(), tt.want) { + t.Errorf("goGit.FilesChanged() = %v, want %v", got, tt.want) + } + }) + } +} + +func Subtest_readFiles(t *testing.T, g Interface, tests []readFileSubTest) { + ctx := context.Background() + for i, tt := range tests { + t.Run(fmt.Sprintf("readFiles_%d", i), func(t *testing.T) { + got, err := g.ReadFileAtCommit(ctx, tt.commit, tt.file) + if (err != nil) != tt.wantErr { + t.Errorf("goGit.ReadFileAtCommit() error = %v, wantErr %v", err, tt.wantErr) + return + } + validateFile := fmt.Sprintf("testdata/%s_%s", tt.commit, strings.ReplaceAll(tt.file, "/", "_")) + want, err := ioutil.ReadFile(validateFile) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got, want) { + t.Errorf("goGit.ReadFileAtCommit() = %v, want %v", got, want) + } + }) + } +} diff --git a/pkg/storage/client/transactional/distributed/git/interfaces.go b/pkg/storage/client/transactional/distributed/git/interfaces.go new file mode 100644 index 00000000..357dfcc0 --- /dev/null +++ b/pkg/storage/client/transactional/distributed/git/interfaces.go @@ -0,0 +1,21 @@ +package git + +import ( + "context" + + "github.com/weaveworks/libgitops/pkg/storage/client/transactional" + "k8s.io/apimachinery/pkg/util/sets" +) + +type Interface interface { + Pull(ctx context.Context) error + Fetch(ctx context.Context, revision string) error + Push(ctx context.Context, branchName string) error + CheckoutBranch(ctx context.Context, branchName string, force, create bool) error + Clean(ctx context.Context) error + FilesChanged(ctx context.Context, fromCommit, toCommit string) (sets.String, error) + Commit(ctx context.Context, commit transactional.Commit) (string, error) + IsWorktreeClean(ctx context.Context) (bool, error) + ReadFileAtCommit(ctx context.Context, commit string, file string) ([]byte, error) + CommitAt(ctx context.Context, branch string) (string, error) +} diff --git a/pkg/storage/client/transactional/distributed/git/options.go b/pkg/storage/client/transactional/distributed/git/options.go new file mode 100644 index 00000000..2613ca1a --- /dev/null +++ b/pkg/storage/client/transactional/distributed/git/options.go @@ -0,0 +1,41 @@ +package git + +type Options struct { + // default is autodetect, i.e. the clone is made without a branch + MainBranch string + + // Authentication method. If unspecified, this clone is read-only. + AuthMethod AuthMethod +} + +func defaultOpts() *Options { + return &Options{} +} + +type Option interface { + ApplyTo(*Options) +} + +func (o *Options) ApplyToTx(target *Options) { + if o.MainBranch != "" { + target.MainBranch = o.MainBranch + } + if o.AuthMethod != nil { + target.AuthMethod = o.AuthMethod + } +} + +func (o *Options) ApplyOptions(opts []Option) *Options { + for _, opt := range opts { + opt.ApplyTo(o) + } + return o +} + +type Branch string + +func (b Branch) ApplyTo(target *Options) { + if b != "" { + target.MainBranch = string(b) + } +} diff --git a/pkg/storage/client/transactional/distributed/git/testdata/19bdfaa92ba594b9d16312e7c923ff9ef09c65d7_README.md b/pkg/storage/client/transactional/distributed/git/testdata/19bdfaa92ba594b9d16312e7c923ff9ef09c65d7_README.md new file mode 100644 index 00000000..4ffab9f4 --- /dev/null +++ b/pkg/storage/client/transactional/distributed/git/testdata/19bdfaa92ba594b9d16312e7c923ff9ef09c65d7_README.md @@ -0,0 +1,18 @@ +# Weave libgitops + +A set of packages to help build Git-backed applications. +Weave `libgitops` builds on top of the [Kubernetes API Machinery](https://github.com/kubernetes/apimachinery). + +## Getting Help + +If you have any questions about, feedback for or problems with `libgitops`: + +- Invite yourself to the [Weave Users Slack](https://slack.weave.works/). +- Ask a question on the [#general](https://weave-community.slack.com/messages/general/) Slack channel. +- [File an issue](https://github.com/weaveworks/libgitops/issues/new). + +Your feedback is always welcome! + +## Notes +This project was formerly called `gitops-toolkit`, but has now been given a more descriptive name. +If you've ended up here, you might be looking for the real [GitOps Toolkit](https://github.com/fluxcd/toolkit). \ No newline at end of file diff --git a/pkg/storage/client/transactional/distributed/git/testdata/fb15f0063ff486debbf525c460797b144c5d641f_README.md b/pkg/storage/client/transactional/distributed/git/testdata/fb15f0063ff486debbf525c460797b144c5d641f_README.md new file mode 100644 index 00000000..0aff5087 --- /dev/null +++ b/pkg/storage/client/transactional/distributed/git/testdata/fb15f0063ff486debbf525c460797b144c5d641f_README.md @@ -0,0 +1,156 @@ +# Weave libgitops + +A library of tools for manipulation and storage of Kubernetes-style objects with inbuilt GitOps functionality. +Weave `libgitops` builds on top of the [Kubernetes API Machinery](https://github.com/kubernetes/apimachinery). + +The library consists of several components, including (but not limited to): + +## YAML/JSON Serializer - `pkg/serializer` + +The libgitops `Serializer` is a powerful extension of the Kubernetes API Machinery serialization/manifest manipulation tools. + +It operates on Kubernetes `runtime.Object` compliant objects (types that implement `metav1.TypeMeta`), and focuses +on streamlining the user experience of dealing with encoding/decoding, versioning (GVKs), conversions and +defaulting. + +It also supports API types built with [controller-runtime](https://pkg.go.dev/sigs.k8s.io/controller-runtime/?tab=doc). + +**Feature highlight:** + +- Preserving of Comments (even through conversions) +- Strict Decoding +- Multi-Frame Support (multiple documents in one file) +- Works with all Kubernetes-like objects + +**Example usage:** + +```go +// Create a serializer instance for Kubernetes types +s := serializer.NewSerializer(scheme.Scheme, nil) + +// Read all YAML documents, frame by frame, from STDIN +fr := serializer.NewYAMLFrameReader(os.Stdin) + +// Decode all YAML documents from the FrameReader to objects +objs, err := s.Decoder().DecodeAll(fr) + +// Write YAML documents, frame by frame, to STDOUT +fw := serializer.NewYAMLFrameWriter(os.Stdout) + +// Encode all objects as YAML documents, into the FrameWriter +err = s.Encoder().Encode(fw, objs...) +``` + +See the [`pkg/serializer`](pkg/serializer) package for details. + +**Note:** If you need to manipulate unstructured objects (not struct-backed, not `runtime.Object` compliant), the +[kyaml](https://pkg.go.dev/sigs.k8s.io/kustomize/kyaml@v0.6.0/yaml?tab=doc) library from kustomize may be a better fit. + +## The extended `runtime` - `pkg/runtime` + +The [`pkg/runtime`](pkg/runtime) package provides additional definitions and helpers around the upstream API Machinery +runtime. The most notable definition is the extended `runtime.Object` (from herein `pkg/runtime.Object`): + +```go +// Object is an union of the Object interfaces that are accessible for a +// type that embeds both metav1.TypeMeta and metav1.ObjectMeta. +type Object interface { + runtime.Object + metav1.ObjectMetaAccessor + metav1.Object +} +``` + +Any struct that embeds both `metav1.TypeMeta` and `metav1.ObjectMeta` inline, and has the automatically-generated +deep-copy code using the tag `// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object` will implement +`pkg/runtime.Object`. See an example in [cmd/sample-app/apis/sample](cmd/sample-app/apis/sample). + +This extended `pkg/runtime.Object` is used heavily in the storage subsystem described below. + +## The storage system - `pkg/storage` + +The storage system is a collection of interfaces and reference implementations for storing Kubernetes-like objects +(that comply to the extended `pkg/runtime.Object` described above). It can be thought of as a database abstraction layer for objects based on how the interfaces are laid out. + +There are three "layers" of storages: + +### RawStorage interface + +The `RawStorage` interfaces deal with _bytes_, this includes `RawStorage` and `MappedRawStorage`. It is essentially a filesystem abstraction. + +- `GenericRawStorage` is a generic implementation of `RawStorage`, storing all objects as files on disk using the following path pattern: `///metadata.json`. +- `GenericMappedRawStorage` is a generic implementation of `MappedRawStorage`, keeping track of mappings between `ObjectKey`s and the real file path on disk. This might be used for e.g. a Git repository where the file structure and contents don't follow a specific format, but mappings need to be registered separately. + +### Storage interfaces + +"Generic" `Storage` interfaces deal with _objects_, this includes `Storage`, `TransactionStorage`, `WatchStorage` and `EventStorage`. + +- The `Storage` interface is a union of two smaller interfaces, `ReadStorage` and `WriteStorage`. It exposes CRUD operations like `Get`, `List`, `Create`, `Update`, `Delete`. +- `TransactionStorage` extends `ReadStorage` with a `Transaction` method, which temporarily gives access to also the `WriteStorage` part when the transaction is active. +- `EventStorage` allows the user to subscribe to object events arising from changes by other actors in the system, e.g. a new object was added, or that someone changed or deleted some other object. + +### Storage implementations + +"High-level" `Storage` implementations bind together multiple `Storage`s, this includes `GenericWatchStorage`, `GitStorage` and `ManifestStorage`. + +- `GenericStorage` is a generic implementation of `Storage`, using the given `RawStorage` and `Serializer` to provide object operations to the user. +- `GenericWatchStorage` is an implementation of `EventStorage`, using inotify to watch a directory on disk. It sends update events to a registered channel. It is a superset of and extends a given `Storage`. +- `GitStorage` takes in a `GitDirectory` a `PullRequestProvider` and a `Serializer`. It watches for new commits automatically pulled by the `GitDirectory`, and re-syncs the underlying `GenericMappedRawStorage`. It implements the `TransactionStorage` interface, and when the transaction is active, allows writing which then yields a new branch and commit, pushed to the origin. Lastly, it can, using the `PullRequestProvider` create a Pull Request for the branch. In the future, it should also implement `EventStorage`. +- `ManifestStorage` watches a directory on disk using `GenericWatchStorage`, uses a `GenericStorage` for object operations, and a `MappedRawStorage` for files. Using it, implementing `EventStorage`, you can subscribe to file update/create/delete events in a given directory, e.g. a cloned Git repository or "manifest directory". + +**Example on how the storages interact:** + +![Storages on byte and object level](docs/images/storage_system_overview.png) + +![Example of TransactionStorage and EventStorage](docs/images/storage_system_transaction.png) + +See the [`pkg/storage`](pkg/storage) package for details. + +### The filtering framework - `pkg/filter` + +The filtering framework provides interfaces for `pkg/runtime.Object` filters and provides some basic filter +implementations. These are used in conjunction with storages when running `Storage.Find` and `Storage.List` calls. + +There are two interfaces: + +- `ListFilter` describes a filter implementation that filters out objects from a given list, like a UNIX pipe. +- `ObjectFilter` describes a filter implementation returning a boolean for if a single given object is a match. + +There is an `ObjectToListFilter` helper provided for easily creating `ListFilter`s out of simpler `ObjectFilter`s. + +See the [`pkg/filter`](pkg/filter) package for details. + +### The GitDirectory - `pkg/gitdir` + +The `GitDirectory` is an abstraction layer for a temporary Git clone. It pulls and checks out new changes periodically +in the background. It allows high-level access to write operations like creating a new branch, committing, and pushing. + +It is currently utilizing some functionality from [go-git-providers](https://github.com/fluxcd/go-git-providers/), but +should be refactored to utilize it more thoroughly. See +[weaveworks/libgitops#38](https://github.com/weaveworks/libgitops/issues/38) for more details regarding the integration. + +See the [`pkg/gitdir`](pkg/gitdir) package for details. + +### Utilities - `pkg/util` + +This package contains utilities used by the rest of the library. The most interesting thing here is the `Patcher` +under [`pkg/util/patch`](pkg/util/patch), which can be used to apply patches to `pkg/runtime.Object` compliant types. + +## Getting Help + +If you have any questions about, feedback for or problems with `libgitops`: + +- Invite yourself to the [Weave Users Slack](https://slack.weave.works/). +- Ask a question on the [#general](https://weave-community.slack.com/messages/general/) Slack channel. +- [File an issue](https://github.com/weaveworks/libgitops/issues/new). + +Your feedback is always welcome! + +## Maintainers + +- Chanwit Kaewkasi, [@chanwit](https://github.com/chanwit) + +## Notes + +This project was formerly called `gitops-toolkit`, but has now been given a more descriptive name. +If you've ended up here, you might be looking for the real [GitOps Toolkit](https://github.com/fluxcd/toolkit). diff --git a/pkg/storage/client/transactional/distributed/git/transport.go b/pkg/storage/client/transactional/distributed/git/transport.go index 3017853a..95866999 100644 --- a/pkg/storage/client/transactional/distributed/git/transport.go +++ b/pkg/storage/client/transactional/distributed/git/transport.go @@ -71,3 +71,7 @@ type authMethod struct { func (a *authMethod) TransportType() gitprovider.TransportType { return a.t } + +func (a *authMethod) ApplyTo(target *Options) { + target.AuthMethod = a +} From 72aa73010c2e1b67b60899ffcf6f30341913fd37 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 23 Jun 2021 19:52:29 +0300 Subject: [PATCH 140/149] WIP --- .gitignore | 6 +- cmd/sample-gitops/main.go | 9 +- foo.diff | 14208 ++++++++++++++++ go.mod | 40 +- go.sum | 390 +- pkg/storage/client/transactional/client.go | 188 +- .../transactional/distributed/client.go | 35 +- .../distributed/git/filesystem.go | 66 + .../transactional/distributed/git/git.go | 12 +- .../transactional/distributed/git/gogit.go | 8 +- .../distributed/git/transport.go | 2 + pkg/storage/client/transactional/handlers.go | 14 +- .../client/transactional/interfaces.go | 29 +- pkg/storage/client/transactional/options.go | 12 +- pkg/storage/client/transactional/tx_branch.go | 23 +- pkg/storage/client/transactional/tx_common.go | 2 +- pkg/storage/client/transactional/tx_ops.go | 2 + pkg/storage/client/transactional/utils.go | 6 + pkg/storage/core/interfaces.go | 23 +- pkg/storage/core/versionref.go | 78 +- pkg/storage/filesystem/filesystem.go | 7 + pkg/storage/filesystem/storage.go | 4 + .../unstructured/btree/btree_cache_test.go | 84 + pkg/storage/filesystem/unstructured/tx/tx.go | 4 +- pkg/storage/interfaces.go | 1 + pkg/util/sync/lock.go | 107 + 26 files changed, 15111 insertions(+), 249 deletions(-) create mode 100644 foo.diff create mode 100644 pkg/storage/client/transactional/distributed/git/filesystem.go create mode 100644 pkg/storage/filesystem/unstructured/btree/btree_cache_test.go create mode 100644 pkg/util/sync/lock.go diff --git a/.gitignore b/.gitignore index ae3df758..f5a3ec30 100644 --- a/.gitignore +++ b/.gitignore @@ -3,4 +3,8 @@ .vscode/ # Binary artifacts -bin/ \ No newline at end of file +bin/ +main_test.go +vendor +*.txt +old diff --git a/cmd/sample-gitops/main.go b/cmd/sample-gitops/main.go index 1657c906..2cee498c 100644 --- a/cmd/sample-gitops/main.go +++ b/cmd/sample-gitops/main.go @@ -138,15 +138,12 @@ func run(identityFile, gitURL, ghToken, authorName, authorEmail, prMilestone str defer func() { cancel() }() // Construct the LocalClone implementation which backs the storage - localClone, err := git.NewLocalClone(ctx, repoRef, git.LocalCloneOptions{ - Branch: "master", - AuthMethod: authMethod, - }) + localClone, err := git.NewLocalClone(ctx, repoRef, authMethod, git.Branch("master")) if err != nil { return err } - ctx = core.WithVersionRef(ctx, core.NewBranchRef(localClone.MainBranch())) + ctx = core.WithMutableVersionRef(ctx, localClone.MainBranch()) // Just use default encoders and decoders encoder := scheme.Serializer.Encoder() @@ -226,7 +223,7 @@ func run(identityFile, gitURL, ghToken, authorName, authorEmail, prMilestone str list.SetGroupVersionKind(v1alpha1.SchemeGroupVersion.WithKind("CarList")) /*if br := c.QueryParam("branch"); len(br) != 0 { - ctx = core.WithVersionRef(ctx, core.NewBranchRef(br)) + ctx = core.WithVersionRef(ctx, core.NewMutableVersionRef(br)) }*/ if err := txClient.List(ctx, list); err != nil { diff --git a/foo.diff b/foo.diff new file mode 100644 index 00000000..d63a4c49 --- /dev/null +++ b/foo.diff @@ -0,0 +1,14208 @@ +diff --git a/Makefile b/Makefile +index 4f3230b..1f2c88b 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,5 +1,5 @@ + UID_GID ?= $(shell id -u):$(shell id -g) +-GO_VERSION ?= 1.14.4 ++GO_VERSION ?= 1.15.6 + GIT_VERSION := $(shell hack/ldflags.sh --version-only) + PROJECT := github.com/weaveworks/libgitops + BOUNDING_API_DIRS := ${PROJECT}/cmd/apis/sample +@@ -7,7 +7,6 @@ API_DIRS := ${PROJECT}/cmd/sample-app/apis/sample,${PROJECT}/cmd/sample-app/apis + SRC_PKGS := cmd pkg + DOCKER_ARGS := --rm + CACHE_DIR := $(shell pwd)/bin/cache +-API_DOCS := api/sample-app.md api/runtime.md + BINARIES := bin/sample-app bin/sample-gitops bin/sample-watch + + # If we're not running in CI, run Docker interactively +@@ -39,7 +38,6 @@ test-internal: + tidy: docker-tidy-internal + tidy-internal: /go/bin/goimports + go mod tidy +- hack/generate-client.sh + gofmt -s -w ${SRC_PKGS} + goimports -w ${SRC_PKGS} + +diff --git a/cmd/common/common.go b/cmd/common/common.go +index dcba7c6..f011dac 100644 +--- a/cmd/common/common.go ++++ b/cmd/common/common.go +@@ -13,8 +13,8 @@ import ( + "github.com/spf13/pflag" + "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1" + "github.com/weaveworks/libgitops/cmd/sample-app/version" +- "github.com/weaveworks/libgitops/pkg/runtime" +- "github.com/weaveworks/libgitops/pkg/storage" ++ "github.com/weaveworks/libgitops/pkg/storage/client" ++ "github.com/weaveworks/libgitops/pkg/storage/core" + ) + + var ( +@@ -25,10 +25,6 @@ func init() { + rand.Seed(time.Now().UnixNano()) + } + +-func CarKeyForName(name string) storage.ObjectKey { +- return storage.NewObjectKey(storage.NewKindKey(CarGVK), runtime.NewIdentifier("default/"+name)) +-} +- + func NewCar(name string) *v1alpha1.Car { + obj := &v1alpha1.Car{} + obj.Name = name +@@ -38,17 +34,17 @@ func NewCar(name string) *v1alpha1.Car { + return obj + } + +-func SetNewCarStatus(s storage.Storage, key storage.ObjectKey) error { +- obj, err := s.Get(key) ++func SetNewCarStatus(ctx context.Context, c client.Client, name string) error { ++ car := &v1alpha1.Car{} ++ err := c.Get(ctx, core.ObjectKey{Name: name}, car) + if err != nil { + return err + } + +- car := obj.(*v1alpha1.Car) + car.Status.Distance = rand.Uint64() + car.Status.Speed = rand.Float64() * 100 + +- return s.Update(car) ++ return c.Update(ctx, car) + } + + func ParseVersionFlag() { +@@ -75,8 +71,8 @@ func NewEcho() *echo.Echo { + func StartEcho(e *echo.Echo) error { + // Start the server + go func() { +- if err := e.Start(":8888"); err != nil { +- e.Logger.Info("shutting down the server") ++ if err := e.Start(":8881"); err != nil { ++ e.Logger.Info("shutting down the server", err) + } + }() + +diff --git a/pkg/logs/flag/flag.go b/cmd/common/logs/flag/flag.go +similarity index 84% +rename from pkg/logs/flag/flag.go +rename to cmd/common/logs/flag/flag.go +index 3c226cf..83f5967 100644 +--- a/pkg/logs/flag/flag.go ++++ b/cmd/common/logs/flag/flag.go +@@ -5,6 +5,9 @@ import ( + "github.com/spf13/pflag" + ) + ++// TODO: Use these flags in the sample binaries? ++// TODO: Move to the way controller-runtime does logs instead? ++ + type LogLevelFlag struct { + value *logrus.Level + } +diff --git a/pkg/logs/logs.go b/cmd/common/logs/logs.go +similarity index 95% +rename from pkg/logs/logs.go +rename to cmd/common/logs/logs.go +index 1ca78f1..c5b11a8 100644 +--- a/pkg/logs/logs.go ++++ b/cmd/common/logs/logs.go +@@ -8,6 +8,8 @@ import ( + log "github.com/sirupsen/logrus" + ) + ++// TODO: Move to the way controller-runtime does logs instead? ++ + // Quiet specifies whether to only print machine-readable IDs + var Quiet bool + +diff --git a/cmd/sample-app/client/client.go b/cmd/sample-app/client/client.go +deleted file mode 100644 +index e4d9824..0000000 +--- a/cmd/sample-app/client/client.go ++++ /dev/null +@@ -1,61 +0,0 @@ +-// TODO: Docs +- +-// +build ignore +- +-package client +- +-import ( +- api "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample" +- "github.com/weaveworks/libgitops/pkg/client" +- "github.com/weaveworks/libgitops/pkg/runtime" +- "github.com/weaveworks/libgitops/pkg/storage" +- +- "k8s.io/apimachinery/pkg/runtime/schema" +-) +- +-// TODO: Autogenerate this! +- +-// NewClient creates a client for the specified storage +-func NewClient(s storage.Storage) *Client { +- return &Client{ +- SampleInternalClient: NewSampleInternalClient(s), +- } +-} +- +-// Client is a struct providing high-level access to objects in a storage +-// The resource-specific client interfaces are automatically generated based +-// off client_resource_template.go. The auto-generation can be done with hack/client.sh +-// At the moment SampleInternalClient is the default client. If more than this client +-// is created in the future, the SampleInternalClient will be accessible under +-// Client.SampleInternal() instead. +-type Client struct { +- *SampleInternalClient +-} +- +-func NewSampleInternalClient(s storage.Storage) *SampleInternalClient { +- return &SampleInternalClient{ +- storage: s, +- dynamicClients: map[schema.GroupVersionKind]client.DynamicClient{}, +- gv: api.SchemeGroupVersion, +- } +-} +- +-type SampleInternalClient struct { +- storage storage.Storage +- gv schema.GroupVersion +- carClient CarClient +- motorcycleClient MotorcycleClient +- dynamicClients map[schema.GroupVersionKind]client.DynamicClient +-} +- +-// Dynamic returns the DynamicClient for the Client instance, for the specific kind +-func (c *SampleInternalClient) Dynamic(kind runtime.Kind) (dc client.DynamicClient) { +- var ok bool +- gvk := c.gv.WithKind(kind.Title()) +- if dc, ok = c.dynamicClients[gvk]; !ok { +- dc = client.NewDynamicClient(c.storage, gvk) +- c.dynamicClients[gvk] = dc +- } +- +- return +-} +diff --git a/cmd/sample-app/client/zz_generated.client_car.go b/cmd/sample-app/client/zz_generated.client_car.go +deleted file mode 100644 +index 2661d45..0000000 +--- a/cmd/sample-app/client/zz_generated.client_car.go ++++ /dev/null +@@ -1,152 +0,0 @@ +-// +build ignore +- +-/* +- Note: This file is autogenerated! Do not edit it manually! +- Edit client_car_template.go instead, and run +- hack/generate-client.sh afterwards. +-*/ +- +-package client +- +-import ( +- "fmt" +- +- api "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample" +- +- log "github.com/sirupsen/logrus" +- "github.com/weaveworks/libgitops/pkg/runtime" +- "github.com/weaveworks/libgitops/pkg/storage" +- "github.com/weaveworks/libgitops/pkg/storage/filterer" +- "k8s.io/apimachinery/pkg/runtime/schema" +-) +- +-// CarClient is an interface for accessing Car-specific API objects +-type CarClient interface { +- // New returns a new Car +- New() *api.Car +- // Get returns the Car matching given UID from the storage +- Get(runtime.UID) (*api.Car, error) +- // Set saves the given Car into persistent storage +- Set(*api.Car) error +- // Patch performs a strategic merge patch on the object with +- // the given UID, using the byte-encoded patch given +- Patch(runtime.UID, []byte) error +- // Find returns the Car matching the given filter, filters can +- // match e.g. the Object's Name, UID or a specific property +- Find(filter filterer.BaseFilter) (*api.Car, error) +- // FindAll returns multiple Cars matching the given filter, filters can +- // match e.g. the Object's Name, UID or a specific property +- FindAll(filter filterer.BaseFilter) ([]*api.Car, error) +- // Delete deletes the Car with the given UID from the storage +- Delete(uid runtime.UID) error +- // List returns a list of all Cars available +- List() ([]*api.Car, error) +-} +- +-// Cars returns the CarClient for the Client object +-func (c *SampleInternalClient) Cars() CarClient { +- if c.carClient == nil { +- c.carClient = newCarClient(c.storage, c.gv) +- } +- +- return c.carClient +-} +- +-// carClient is a struct implementing the CarClient interface +-// It uses a shared storage instance passed from the Client together with its own Filterer +-type carClient struct { +- storage storage.Storage +- filterer *filterer.Filterer +- gvk schema.GroupVersionKind +-} +- +-// newCarClient builds the carClient struct using the storage implementation and a new Filterer +-func newCarClient(s storage.Storage, gv schema.GroupVersion) CarClient { +- return &carClient{ +- storage: s, +- filterer: filterer.NewFilterer(s), +- gvk: gv.WithKind(api.KindCar.Title()), +- } +-} +- +-// New returns a new Object of its kind +-func (c *carClient) New() *api.Car { +- log.Tracef("Client.New; GVK: %v", c.gvk) +- obj, err := c.storage.New(c.gvk) +- if err != nil { +- panic(fmt.Sprintf("Client.New must not return an error: %v", err)) +- } +- return obj.(*api.Car) +-} +- +-// Find returns a single Car based on the given Filter +-func (c *carClient) Find(filter filterer.BaseFilter) (*api.Car, error) { +- log.Tracef("Client.Find; GVK: %v", c.gvk) +- object, err := c.filterer.Find(c.gvk, filter) +- if err != nil { +- return nil, err +- } +- +- return object.(*api.Car), nil +-} +- +-// FindAll returns multiple Cars based on the given Filter +-func (c *carClient) FindAll(filter filterer.BaseFilter) ([]*api.Car, error) { +- log.Tracef("Client.FindAll; GVK: %v", c.gvk) +- matches, err := c.filterer.FindAll(c.gvk, filter) +- if err != nil { +- return nil, err +- } +- +- results := make([]*api.Car, 0, len(matches)) +- for _, item := range matches { +- results = append(results, item.(*api.Car)) +- } +- +- return results, nil +-} +- +-// Get returns the Car matching given UID from the storage +-func (c *carClient) Get(uid runtime.UID) (*api.Car, error) { +- log.Tracef("Client.Get; UID: %q, GVK: %v", uid, c.gvk) +- object, err := c.storage.Get(c.gvk, uid) +- if err != nil { +- return nil, err +- } +- +- return object.(*api.Car), nil +-} +- +-// Set saves the given Car into the persistent storage +-func (c *carClient) Set(car *api.Car) error { +- log.Tracef("Client.Set; UID: %q, GVK: %v", car.GetUID(), c.gvk) +- return c.storage.Set(c.gvk, car) +-} +- +-// Patch performs a strategic merge patch on the object with +-// the given UID, using the byte-encoded patch given +-func (c *carClient) Patch(uid runtime.UID, patch []byte) error { +- return c.storage.Patch(c.gvk, uid, patch) +-} +- +-// Delete deletes the Car from the storage +-func (c *carClient) Delete(uid runtime.UID) error { +- log.Tracef("Client.Delete; UID: %q, GVK: %v", uid, c.gvk) +- return c.storage.Delete(c.gvk, uid) +-} +- +-// List returns a list of all Cars available +-func (c *carClient) List() ([]*api.Car, error) { +- log.Tracef("Client.List; GVK: %v", c.gvk) +- list, err := c.storage.List(c.gvk) +- if err != nil { +- return nil, err +- } +- +- results := make([]*api.Car, 0, len(list)) +- for _, item := range list { +- results = append(results, item.(*api.Car)) +- } +- +- return results, nil +-} +diff --git a/cmd/sample-app/client/zz_generated.client_motorcycle.go b/cmd/sample-app/client/zz_generated.client_motorcycle.go +deleted file mode 100644 +index 7256e00..0000000 +--- a/cmd/sample-app/client/zz_generated.client_motorcycle.go ++++ /dev/null +@@ -1,152 +0,0 @@ +-// +build ignore +- +-/* +- Note: This file is autogenerated! Do not edit it manually! +- Edit client_motorcycle_template.go instead, and run +- hack/generate-client.sh afterwards. +-*/ +- +-package client +- +-import ( +- "fmt" +- +- api "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample" +- +- log "github.com/sirupsen/logrus" +- "github.com/weaveworks/libgitops/pkg/runtime" +- "github.com/weaveworks/libgitops/pkg/storage" +- "github.com/weaveworks/libgitops/pkg/storage/filterer" +- "k8s.io/apimachinery/pkg/runtime/schema" +-) +- +-// MotorcycleClient is an interface for accessing Motorcycle-specific API objects +-type MotorcycleClient interface { +- // New returns a new Motorcycle +- New() *api.Motorcycle +- // Get returns the Motorcycle matching given UID from the storage +- Get(runtime.UID) (*api.Motorcycle, error) +- // Set saves the given Motorcycle into persistent storage +- Set(*api.Motorcycle) error +- // Patch performs a strategic merge patch on the object with +- // the given UID, using the byte-encoded patch given +- Patch(runtime.UID, []byte) error +- // Find returns the Motorcycle matching the given filter, filters can +- // match e.g. the Object's Name, UID or a specific property +- Find(filter filterer.BaseFilter) (*api.Motorcycle, error) +- // FindAll returns multiple Motorcycles matching the given filter, filters can +- // match e.g. the Object's Name, UID or a specific property +- FindAll(filter filterer.BaseFilter) ([]*api.Motorcycle, error) +- // Delete deletes the Motorcycle with the given UID from the storage +- Delete(uid runtime.UID) error +- // List returns a list of all Motorcycles available +- List() ([]*api.Motorcycle, error) +-} +- +-// Motorcycles returns the MotorcycleClient for the Client object +-func (c *SampleInternalClient) Motorcycles() MotorcycleClient { +- if c.motorcycleClient == nil { +- c.motorcycleClient = newMotorcycleClient(c.storage, c.gv) +- } +- +- return c.motorcycleClient +-} +- +-// motorcycleClient is a struct implementing the MotorcycleClient interface +-// It uses a shared storage instance passed from the Client together with its own Filterer +-type motorcycleClient struct { +- storage storage.Storage +- filterer *filterer.Filterer +- gvk schema.GroupVersionKind +-} +- +-// newMotorcycleClient builds the motorcycleClient struct using the storage implementation and a new Filterer +-func newMotorcycleClient(s storage.Storage, gv schema.GroupVersion) MotorcycleClient { +- return &motorcycleClient{ +- storage: s, +- filterer: filterer.NewFilterer(s), +- gvk: gv.WithKind(api.KindMotorcycle.Title()), +- } +-} +- +-// New returns a new Object of its kind +-func (c *motorcycleClient) New() *api.Motorcycle { +- log.Tracef("Client.New; GVK: %v", c.gvk) +- obj, err := c.storage.New(c.gvk) +- if err != nil { +- panic(fmt.Sprintf("Client.New must not return an error: %v", err)) +- } +- return obj.(*api.Motorcycle) +-} +- +-// Find returns a single Motorcycle based on the given Filter +-func (c *motorcycleClient) Find(filter filterer.BaseFilter) (*api.Motorcycle, error) { +- log.Tracef("Client.Find; GVK: %v", c.gvk) +- object, err := c.filterer.Find(c.gvk, filter) +- if err != nil { +- return nil, err +- } +- +- return object.(*api.Motorcycle), nil +-} +- +-// FindAll returns multiple Motorcycles based on the given Filter +-func (c *motorcycleClient) FindAll(filter filterer.BaseFilter) ([]*api.Motorcycle, error) { +- log.Tracef("Client.FindAll; GVK: %v", c.gvk) +- matches, err := c.filterer.FindAll(c.gvk, filter) +- if err != nil { +- return nil, err +- } +- +- results := make([]*api.Motorcycle, 0, len(matches)) +- for _, item := range matches { +- results = append(results, item.(*api.Motorcycle)) +- } +- +- return results, nil +-} +- +-// Get returns the Motorcycle matching given UID from the storage +-func (c *motorcycleClient) Get(uid runtime.UID) (*api.Motorcycle, error) { +- log.Tracef("Client.Get; UID: %q, GVK: %v", uid, c.gvk) +- object, err := c.storage.Get(c.gvk, uid) +- if err != nil { +- return nil, err +- } +- +- return object.(*api.Motorcycle), nil +-} +- +-// Set saves the given Motorcycle into the persistent storage +-func (c *motorcycleClient) Set(motorcycle *api.Motorcycle) error { +- log.Tracef("Client.Set; UID: %q, GVK: %v", motorcycle.GetUID(), c.gvk) +- return c.storage.Set(c.gvk, motorcycle) +-} +- +-// Patch performs a strategic merge patch on the object with +-// the given UID, using the byte-encoded patch given +-func (c *motorcycleClient) Patch(uid runtime.UID, patch []byte) error { +- return c.storage.Patch(c.gvk, uid, patch) +-} +- +-// Delete deletes the Motorcycle from the storage +-func (c *motorcycleClient) Delete(uid runtime.UID) error { +- log.Tracef("Client.Delete; UID: %q, GVK: %v", uid, c.gvk) +- return c.storage.Delete(c.gvk, uid) +-} +- +-// List returns a list of all Motorcycles available +-func (c *motorcycleClient) List() ([]*api.Motorcycle, error) { +- log.Tracef("Client.List; GVK: %v", c.gvk) +- list, err := c.storage.List(c.gvk) +- if err != nil { +- return nil, err +- } +- +- results := make([]*api.Motorcycle, 0, len(list)) +- for _, item := range list { +- results = append(results, item.(*api.Motorcycle)) +- } +- +- return results, nil +-} +diff --git a/cmd/sample-app/main.go b/cmd/sample-app/main.go +index ea119a9..2812acc 100644 +--- a/cmd/sample-app/main.go ++++ b/cmd/sample-app/main.go +@@ -2,7 +2,10 @@ package main + + import ( + "bytes" ++ "context" ++ "encoding/json" + "fmt" ++ "io/ioutil" + "net/http" + "os" + +@@ -10,12 +13,19 @@ import ( + "github.com/sirupsen/logrus" + "github.com/spf13/pflag" + "github.com/weaveworks/libgitops/cmd/common" ++ "github.com/weaveworks/libgitops/cmd/common/logs" + "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/scheme" + "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1" +- "github.com/weaveworks/libgitops/pkg/logs" +- "github.com/weaveworks/libgitops/pkg/runtime" + "github.com/weaveworks/libgitops/pkg/serializer" +- "github.com/weaveworks/libgitops/pkg/storage" ++ "github.com/weaveworks/libgitops/pkg/storage/backend" ++ "github.com/weaveworks/libgitops/pkg/storage/client" ++ "github.com/weaveworks/libgitops/pkg/storage/core" ++ "github.com/weaveworks/libgitops/pkg/storage/filesystem" ++ "github.com/weaveworks/libgitops/pkg/storage/kube" ++ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ++ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ++ "k8s.io/apimachinery/pkg/types" ++ ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" + ) + + var manifestDirFlag = pflag.String("data-dir", "/tmp/libgitops/manifest", "Where to store the YAML files") +@@ -25,27 +35,43 @@ func main() { + common.ParseVersionFlag() + + // Run the application +- if err := run(); err != nil { ++ if err := run(*manifestDirFlag); err != nil { + fmt.Println(err) + os.Exit(1) + } + } + +-func run() error { ++func run(manifestDir string) error { ++ ctx := context.Background() + // Create the manifest directory +- if err := os.MkdirAll(*manifestDirFlag, 0755); err != nil { ++ if err := os.MkdirAll(manifestDir, 0755); err != nil { + return err + } + + // Set the log level + logs.Logger.SetLevel(logrus.InfoLevel) + +- plainStorage := storage.NewGenericStorage( +- storage.NewGenericRawStorage(*manifestDirFlag, v1alpha1.SchemeGroupVersion, serializer.ContentTypeYAML), +- scheme.Serializer, +- []runtime.IdentifierFactory{runtime.Metav1NameIdentifier}, ++ s, err := filesystem.NewSimpleStorage( ++ manifestDir, ++ core.StaticNamespacer{NamespacedIsDefaultPolicy: false}, ++ filesystem.SimpleFileFinderOptions{ ++ DisableGroupDirectory: true, ++ ContentType: serializer.ContentTypeYAML, ++ }, + ) +- defer func() { _ = plainStorage.Close() }() ++ if err != nil { ++ return err ++ } ++ ++ b, err := backend.NewGeneric(s, scheme.Serializer, kube.NewNamespaceEnforcer(), nil, nil) ++ if err != nil { ++ return err ++ } ++ ++ plainClient, err := client.NewGeneric(b, scheme.Serializer.Patcher()) ++ if err != nil { ++ return err ++ } + + e := common.NewEcho() + +@@ -55,7 +81,8 @@ func run() error { + return echo.NewHTTPError(http.StatusBadRequest, "Please set name") + } + +- obj, err := plainStorage.Get(common.CarKeyForName(name)) ++ obj := &v1alpha1.Car{} ++ err := plainClient.Get(ctx, core.ObjectKey{Name: name}, obj) + if err != nil { + return err + } +@@ -66,13 +93,92 @@ func run() error { + return c.JSONBlob(http.StatusOK, content.Bytes()) + }) + ++ e.GET("/meta/", func(c echo.Context) error { ++ list := &metav1.PartialObjectMetadataList{} ++ list.SetGroupVersionKind(v1alpha1.SchemeGroupVersion.WithKind("CarList")) ++ err := plainClient.List(ctx, list) ++ if err != nil { ++ return err ++ } ++ var content bytes.Buffer ++ if err := scheme.Serializer.Encoder().Encode(serializer.NewJSONFrameWriter(&content), list); err != nil { ++ return err ++ } ++ return c.JSONBlob(http.StatusOK, content.Bytes()) ++ }) ++ ++ e.GET("/meta/:name", func(c echo.Context) error { ++ name := c.Param("name") ++ if len(name) == 0 { ++ return echo.NewHTTPError(http.StatusBadRequest, "Please set name") ++ } ++ ++ obj := &metav1.PartialObjectMetadata{} ++ obj.SetGroupVersionKind(v1alpha1.SchemeGroupVersion.WithKind("Car")) ++ err := plainClient.Get(ctx, core.ObjectKey{ ++ Name: name, ++ }, obj) ++ if err != nil { ++ return err ++ } ++ var content bytes.Buffer ++ if err := scheme.Serializer.Encoder().Encode(serializer.NewJSONFrameWriter(&content), obj); err != nil { ++ return err ++ } ++ return c.JSONBlob(http.StatusOK, content.Bytes()) ++ }) ++ ++ e.GET("/unstructured/", func(c echo.Context) error { ++ list := &unstructured.UnstructuredList{} ++ list.SetGroupVersionKind(v1alpha1.SchemeGroupVersion.WithKind("CarList")) ++ err := plainClient.List(ctx, list) ++ if err != nil { ++ return err ++ } ++ var content bytes.Buffer ++ if err := scheme.Serializer.Encoder().Encode(serializer.NewJSONFrameWriter(&content), list); err != nil { ++ return err ++ } ++ var newcontent bytes.Buffer ++ if err := json.Indent(&newcontent, content.Bytes(), "", " "); err != nil { ++ return err ++ } ++ return c.JSONBlob(http.StatusOK, newcontent.Bytes()) ++ }) ++ ++ e.GET("/unstructured/:name", func(c echo.Context) error { ++ name := c.Param("name") ++ if len(name) == 0 { ++ return echo.NewHTTPError(http.StatusBadRequest, "Please set name") ++ } ++ ++ obj := &unstructured.Unstructured{} ++ obj.SetGroupVersionKind(v1alpha1.SchemeGroupVersion.WithKind("Car")) ++ err := plainClient.Get(ctx, core.ObjectKey{ ++ Name: name, ++ }, obj) ++ if err != nil { ++ return err ++ } ++ var content bytes.Buffer ++ // This does for some reason not pretty-encode the output ++ if err := scheme.Serializer.Encoder().Encode(serializer.NewJSONFrameWriter(&content), obj); err != nil { ++ return err ++ } ++ var newcontent bytes.Buffer ++ if err := json.Indent(&newcontent, content.Bytes(), "", " "); err != nil { ++ return err ++ } ++ return c.JSONBlob(http.StatusOK, newcontent.Bytes()) ++ }) ++ + e.POST("/plain/:name", func(c echo.Context) error { + name := c.Param("name") + if len(name) == 0 { + return echo.NewHTTPError(http.StatusBadRequest, "Please set name") + } + +- if err := plainStorage.Create(common.NewCar(name)); err != nil { ++ if err := plainClient.Create(ctx, common.NewCar(name)); err != nil { + return err + } + return c.String(200, "OK!") +@@ -84,11 +190,45 @@ func run() error { + return echo.NewHTTPError(http.StatusBadRequest, "Please set name") + } + +- if err := common.SetNewCarStatus(plainStorage, common.CarKeyForName(name)); err != nil { ++ if err := common.SetNewCarStatus(ctx, plainClient, name); err != nil { + return err + } + return c.String(200, "OK!") + }) + ++ e.PATCH("/plain/:name", func(c echo.Context) error { ++ name := c.Param("name") ++ if len(name) == 0 { ++ return echo.NewHTTPError(http.StatusBadRequest, "Please set name") ++ } ++ ++ body, err := ioutil.ReadAll(c.Request().Body) ++ if err != nil { ++ return err ++ } ++ c.Request().Body.Close() ++ ++ car := &v1alpha1.Car{} ++ err = plainClient.Get(ctx, core.ObjectKey{ ++ Name: name, ++ }, car) ++ if err != nil { ++ return err ++ } ++ ++ if err := plainClient.Patch(ctx, car, ctrlclient.RawPatch(types.MergePatchType, body)); err != nil { ++ return err ++ } ++ ++ return c.JSON(200, car) ++ }) ++ + return common.StartEcho(e) + } ++ ++/* ++type noNamespacesRESTMapper struct{} ++ ++func (noNamespacesRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { ++ return &meta.RESTMapping{Scope: meta.RESTScopeRoot}, nil ++}*/ +diff --git a/cmd/sample-gitops/main.go b/cmd/sample-gitops/main.go +index e8c2180..d18a9d5 100644 +--- a/cmd/sample-gitops/main.go ++++ b/cmd/sample-gitops/main.go +@@ -1,9 +1,11 @@ + package main + + import ( ++ "bytes" + "context" + "fmt" + "io/ioutil" ++ "math/rand" + "net/http" + "os" + "time" +@@ -15,14 +17,22 @@ import ( + "github.com/sirupsen/logrus" + "github.com/spf13/pflag" + "github.com/weaveworks/libgitops/cmd/common" ++ "github.com/weaveworks/libgitops/cmd/common/logs" + "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/scheme" +- "github.com/weaveworks/libgitops/pkg/gitdir" +- "github.com/weaveworks/libgitops/pkg/logs" +- "github.com/weaveworks/libgitops/pkg/storage" +- "github.com/weaveworks/libgitops/pkg/storage/transaction" +- githubpr "github.com/weaveworks/libgitops/pkg/storage/transaction/pullrequest/github" +- "github.com/weaveworks/libgitops/pkg/storage/watch" +- "github.com/weaveworks/libgitops/pkg/storage/watch/update" ++ "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1" ++ "github.com/weaveworks/libgitops/pkg/serializer" ++ "github.com/weaveworks/libgitops/pkg/storage/backend" ++ "github.com/weaveworks/libgitops/pkg/storage/client" ++ "github.com/weaveworks/libgitops/pkg/storage/client/transactional" ++ "github.com/weaveworks/libgitops/pkg/storage/client/transactional/distributed" ++ "github.com/weaveworks/libgitops/pkg/storage/client/transactional/distributed/git" ++ githubpr "github.com/weaveworks/libgitops/pkg/storage/client/transactional/distributed/git/github" ++ "github.com/weaveworks/libgitops/pkg/storage/core" ++ "github.com/weaveworks/libgitops/pkg/storage/event" ++ "github.com/weaveworks/libgitops/pkg/storage/filesystem" ++ unstructuredevent "github.com/weaveworks/libgitops/pkg/storage/filesystem/unstructured/event" ++ "github.com/weaveworks/libgitops/pkg/storage/kube" ++ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + ) + + var ( +@@ -30,8 +40,9 @@ var ( + authorNameFlag = pflag.String("author-name", defaultAuthorName, "Author name for Git commits") + authorEmailFlag = pflag.String("author-email", defaultAuthorEmail, "Author email for Git commits") + gitURLFlag = pflag.String("git-url", "", "HTTPS Git URL; where the Git repository is, e.g. https://github.com/luxas/ignite-gitops") +- prAssigneeFlag = pflag.StringSlice("pr-assignees", nil, "What user logins to assign for the created PR. The user must have pull access to the repo.") + prMilestoneFlag = pflag.String("pr-milestone", "", "What milestone to tag the PR with") ++ prAssigneesFlag = pflag.StringSlice("pr-assignees", nil, "What user logins to assign for the created PR. The user must have pull access to the repo.") ++ prLabelsFlag = pflag.StringSlice("pr-labels", nil, "What labels to apply on the created PR. The labels must already exist. E.g. \"user/bot,actuator/libgitops,kind/status-update\"") + ) + + const ( +@@ -46,7 +57,16 @@ func main() { + common.ParseVersionFlag() + + // Run the application +- if err := run(*identityFlag, *gitURLFlag, os.Getenv("GITHUB_TOKEN"), *authorNameFlag, *authorEmailFlag); err != nil { ++ if err := run( ++ *identityFlag, ++ *gitURLFlag, ++ os.Getenv("GITHUB_TOKEN"), ++ *authorNameFlag, ++ *authorEmailFlag, ++ *prMilestoneFlag, ++ *prAssigneesFlag, ++ *prLabelsFlag, ++ ); err != nil { + fmt.Println(err) + os.Exit(1) + } +@@ -60,7 +80,8 @@ func expandAndRead(filePath string) ([]byte, error) { + return ioutil.ReadFile(expandedPath) + } + +-func run(identityFile, gitURL, ghToken, authorName, authorEmail string) error { ++func run(identityFile, gitURL, ghToken, authorName, authorEmail, prMilestone string, ++ prAssignees, prLabels []string) error { + // Validate parameters + if len(identityFile) == 0 { + return fmt.Errorf("--identity-file is required") +@@ -69,7 +90,7 @@ func run(identityFile, gitURL, ghToken, authorName, authorEmail string) error { + return fmt.Errorf("--git-url is required") + } + if len(ghToken) == 0 { +- return fmt.Errorf("--github-token is required") ++ return fmt.Errorf("GITHUB_TOKEN is required") + } + if len(authorName) == 0 { + return fmt.Errorf("--author-name is required") +@@ -78,6 +99,9 @@ func run(identityFile, gitURL, ghToken, authorName, authorEmail string) error { + return fmt.Errorf("--author-email is required") + } + ++ // Set the log level ++ logs.Logger.SetLevel(logrus.TraceLevel) ++ + // Read the identity and known_hosts files + identityContent, err := expandAndRead(identityFile) + if err != nil { +@@ -101,58 +125,101 @@ func run(identityFile, gitURL, ghToken, authorName, authorEmail string) error { + } + + // Authenticate to the GitDirectory using Git SSH +- authMethod, err := gitdir.NewSSHAuthMethod(identityContent, knownHostsContent) ++ authMethod, err := git.NewSSHAuthMethod(identityContent, knownHostsContent) + if err != nil { + return err + } + +- // Construct the GitDirectory implementation which backs the storage +- gitDir, err := gitdir.NewGitDirectory(repoRef, gitdir.GitDirectoryOptions{ ++ ctx, cancel := context.WithCancel(context.Background()) ++ ++ defer func() { cancel() }() ++ ++ // Construct the LocalClone implementation which backs the storage ++ localClone, err := git.NewLocalClone(ctx, repoRef, git.LocalCloneOptions{ + Branch: "master", +- Interval: 10 * time.Second, + AuthMethod: authMethod, + }) + if err != nil { + return err + } + +- // Create a new PR provider for the GitStorage +- prProvider, err := githubpr.NewGitHubPRProvider(ghClient) ++ rawManifest, err := unstructuredevent.NewManifest( ++ localClone.Dir(), ++ filesystem.DefaultContentTyper, ++ core.StaticNamespacer{NamespacedIsDefaultPolicy: false}, // all objects root-spaced ++ &core.SerializerObjectRecognizer{Serializer: scheme.Serializer}, ++ filesystem.DefaultPathExcluders(), ++ ) + if err != nil { + return err + } +- // Create a new GitStorage using the GitDirectory, PR provider, and Serializer +- gitStorage, err := transaction.NewGitStorage(gitDir, prProvider, scheme.Serializer) ++ ++ // Create the channel to receive events to, and register it with the EventStorage ++ updates := make(event.ObjectEventStream, 4096) ++ if err := rawManifest.WatchForObjectEvents(ctx, updates); err != nil { ++ return err ++ } ++ ++ defer func() { _ = rawManifest.Close() }() ++ ++ b, err := backend.NewGeneric(rawManifest, scheme.Serializer, kube.NewNamespaceEnforcer(), nil, nil) + if err != nil { + return err + } + +- // Set the log level +- logs.Logger.SetLevel(logrus.InfoLevel) ++ gitClient, err := client.NewGeneric(b, scheme.Serializer.Patcher()) ++ if err != nil { ++ return err ++ } + +- watchStorage, err := watch.NewManifestStorage(gitDir.Dir(), scheme.Serializer) ++ txGeneralClient, err := transactional.NewGeneric(gitClient, localClone, nil) + if err != nil { + return err + } +- defer func() { _ = watchStorage.Close() }() + +- updates := make(chan update.Update, 4096) +- watchStorage.SetUpdateStream(updates) ++ txClient, err := distributed.NewClient(txGeneralClient, localClone) ++ if err != nil { ++ return err ++ } ++ ++ // Create a new CommitHook for sending PRs ++ prCommitHook, err := githubpr.NewGitHubPRCommitHandler(ghClient, localClone.RepositoryRef()) ++ if err != nil { ++ return err ++ } ++ ++ // Register the PR CommitHook with the BranchManager ++ // This needs to be done after the distributed.NewClient call, so ++ // it has been able to handle pushing of the branch first. ++ localClone.CommitHookChain().Register(prCommitHook) ++ ++ // Start the sync loop in the background ++ txClient.StartResyncLoop(ctx, 15*time.Second) + + go func() { + for upd := range updates { +- logrus.Infof("Got %s update for: %v %v", upd.Event, upd.PartialObject.GetObjectKind().GroupVersionKind(), upd.PartialObject.GetObjectMeta()) ++ logrus.Infof("Got %s update for: %v %v", upd.Type, upd.ID.GroupKind(), upd.ID.ObjectKey()) + } + }() + + e := common.NewEcho() + + e.GET("/git/", func(c echo.Context) error { +- objs, err := gitStorage.List(storage.NewKindKey(common.CarGVK)) +- if err != nil { ++ list := &unstructured.UnstructuredList{} ++ list.SetGroupVersionKind(v1alpha1.SchemeGroupVersion.WithKind("CarList")) ++ ++ /*if br := c.QueryParam("branch"); len(br) != 0 { ++ ctx = core.WithVersionRef(ctx, core.NewBranchRef(br)) ++ }*/ ++ ++ if err := txClient.List(ctx, list); err != nil { ++ return err ++ } ++ var content bytes.Buffer ++ if err := scheme.Serializer.Encoder().Encode(serializer.NewJSONFrameWriter(&content), list); err != nil { + return err + } +- return c.JSON(http.StatusOK, objs) ++ return c.JSONBlob(http.StatusOK, content.Bytes()) + }) + + e.PUT("/git/:name", func(c echo.Context) error { +@@ -161,26 +228,36 @@ func run(identityFile, gitURL, ghToken, authorName, authorEmail string) error { + return echo.NewHTTPError(http.StatusBadRequest, "Please set name") + } + +- objKey := common.CarKeyForName(name) +- err := gitStorage.Transaction(context.Background(), fmt.Sprintf("%s-update-", name), func(ctx context.Context, s storage.Storage) (transaction.CommitResult, error) { ++ car := v1alpha1.Car{} ++ carKey := core.ObjectKey{Name: name} + +- // Update the status of the car +- if err := common.SetNewCarStatus(s, objKey); err != nil { +- return nil, err +- } ++ branchCtx := core.WithVersionRef(ctx, core.NewBranchRef(localClone.MainBranch())) + +- return &transaction.GenericPullRequestResult{ +- CommitResult: &transaction.GenericCommitResult{ +- AuthorName: authorName, +- AuthorEmail: authorEmail, +- Title: "Update Car speed", +- Description: "We really need to sync this state!", ++ headBranch := fmt.Sprintf("%s-update-", name) ++ err := txClient. ++ BranchTransaction(branchCtx, headBranch). ++ Get(carKey, &car). ++ Custom(func(ctx context.Context) error { ++ car.Status.Distance = rand.Uint64() ++ car.Status.Speed = rand.Float64() * 100 ++ return nil ++ }). ++ Update(&car). ++ CreateTx(githubpr.GenericPullRequest{ ++ Commit: transactional.GenericCommit{ ++ Author: transactional.GenericCommitAuthor{ ++ Name: authorName, ++ Email: authorEmail, ++ }, ++ Message: transactional.GenericCommitMessage{ ++ Title: "Update Car speed", ++ Description: "We really need to sync this state!", ++ }, + }, +- Labels: []string{"user/bot", "actuator/libgitops", "kind/status-update"}, +- Assignees: *prAssigneeFlag, +- Milestone: *prMilestoneFlag, +- }, nil +- }) ++ Labels: prLabels, ++ Assignees: prAssignees, ++ Milestone: prMilestone, ++ }).Error() + if err != nil { + return err + } +diff --git a/cmd/sample-watch/main.go b/cmd/sample-watch/main.go +index ef1aec0..c81a279 100644 +--- a/cmd/sample-watch/main.go ++++ b/cmd/sample-watch/main.go +@@ -2,6 +2,7 @@ package main + + import ( + "bytes" ++ "context" + "fmt" + "net/http" + "os" +@@ -10,11 +11,17 @@ import ( + "github.com/sirupsen/logrus" + "github.com/spf13/pflag" + "github.com/weaveworks/libgitops/cmd/common" ++ "github.com/weaveworks/libgitops/cmd/common/logs" + "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/scheme" +- "github.com/weaveworks/libgitops/pkg/logs" ++ "github.com/weaveworks/libgitops/cmd/sample-app/apis/sample/v1alpha1" + "github.com/weaveworks/libgitops/pkg/serializer" +- "github.com/weaveworks/libgitops/pkg/storage/watch" +- "github.com/weaveworks/libgitops/pkg/storage/watch/update" ++ "github.com/weaveworks/libgitops/pkg/storage/backend" ++ "github.com/weaveworks/libgitops/pkg/storage/client" ++ "github.com/weaveworks/libgitops/pkg/storage/core" ++ "github.com/weaveworks/libgitops/pkg/storage/event" ++ "github.com/weaveworks/libgitops/pkg/storage/filesystem" ++ unstructuredevent "github.com/weaveworks/libgitops/pkg/storage/filesystem/unstructured/event" ++ "github.com/weaveworks/libgitops/pkg/storage/kube" + ) + + var watchDirFlag = pflag.String("watch-dir", "/tmp/libgitops/watch", "Where to watch for YAML/JSON manifests") +@@ -24,33 +31,55 @@ func main() { + common.ParseVersionFlag() + + // Run the application +- if err := run(); err != nil { ++ if err := run(*watchDirFlag); err != nil { + fmt.Println(err) + os.Exit(1) + } + } + +-func run() error { ++func run(watchDir string) error { + // Create the watch directory + if err := os.MkdirAll(*watchDirFlag, 0755); err != nil { + return err + } + + // Set the log level +- logs.Logger.SetLevel(logrus.InfoLevel) ++ logs.Logger.SetLevel(logrus.TraceLevel) + +- watchStorage, err := watch.NewManifestStorage(*watchDirFlag, scheme.Serializer) ++ ctx := context.Background() ++ ++ rawManifest, err := unstructuredevent.NewManifest( ++ watchDir, ++ filesystem.DefaultContentTyper, ++ core.StaticNamespacer{NamespacedIsDefaultPolicy: false}, // all objects root-spaced ++ &core.SerializerObjectRecognizer{Serializer: scheme.Serializer}, ++ filesystem.DefaultPathExcluders(), ++ ) ++ if err != nil { ++ return err ++ } ++ ++ // Create the channel to receive events to, and register it with the EventStorage ++ updates := make(event.ObjectEventStream, 4096) ++ if err := rawManifest.WatchForObjectEvents(ctx, updates); err != nil { ++ return err ++ } ++ ++ b, err := backend.NewGeneric(rawManifest, scheme.Serializer, kube.NewNamespaceEnforcer(), nil, nil) ++ if err != nil { ++ return err ++ } ++ ++ watchStorage, err := client.NewGeneric(b, scheme.Serializer.Patcher()) + if err != nil { + return err + } +- defer func() { _ = watchStorage.Close() }() + +- updates := make(chan update.Update, 4096) +- watchStorage.SetUpdateStream(updates) ++ defer func() { _ = rawManifest.Close() }() + + go func() { + for upd := range updates { +- logrus.Infof("Got %s update for: %v %v", upd.Event, upd.PartialObject.GetObjectKind().GroupVersionKind(), upd.PartialObject.GetObjectMeta()) ++ logrus.Infof("Got %s update for: %v %v", upd.Type, upd.ID.GroupKind(), upd.ID.ObjectKey()) + } + }() + +@@ -62,7 +91,8 @@ func run() error { + return echo.NewHTTPError(http.StatusBadRequest, "Please set name") + } + +- obj, err := watchStorage.Get(common.CarKeyForName(name)) ++ obj := &v1alpha1.Car{} ++ err := watchStorage.Get(ctx, core.ObjectKey{Name: name}, obj) + if err != nil { + return err + } +@@ -79,7 +109,7 @@ func run() error { + return echo.NewHTTPError(http.StatusBadRequest, "Please set name") + } + +- if err := common.SetNewCarStatus(watchStorage, common.CarKeyForName(name)); err != nil { ++ if err := common.SetNewCarStatus(ctx, watchStorage, name); err != nil { + return err + } + return c.String(200, "OK!") +diff --git a/go.mod b/go.mod +index c03013f..499f482 100644 +--- a/go.mod ++++ b/go.mod +@@ -1,31 +1,30 @@ + module github.com/weaveworks/libgitops + +-go 1.14 ++go 1.15 + +-replace ( +- github.com/docker/distribution => github.com/docker/distribution v2.7.1+incompatible +- github.com/googleapis/gnostic => github.com/googleapis/gnostic v0.3.0 +-) ++replace github.com/docker/distribution => github.com/docker/distribution v2.7.1+incompatible + + require ( +- github.com/fluxcd/go-git-providers v0.0.2 +- github.com/fluxcd/toolkit v0.0.1-beta.2 +- github.com/go-git/go-git/v5 v5.1.0 +- github.com/go-openapi/spec v0.19.8 ++ github.com/evanphx/json-patch v4.9.0+incompatible ++ github.com/fluxcd/go-git-providers v0.0.3 ++ github.com/fluxcd/pkg/ssh v0.0.5 ++ github.com/go-git/go-git/v5 v5.2.0 ++ github.com/go-openapi/spec v0.20.0 + github.com/google/go-github/v32 v32.1.0 + github.com/labstack/echo v3.3.10+incompatible + github.com/labstack/gommon v0.3.0 // indirect + github.com/mattn/go-isatty v0.0.12 // indirect + github.com/mitchellh/go-homedir v1.1.0 + github.com/rjeczalik/notify v0.9.2 +- github.com/sirupsen/logrus v1.6.0 ++ github.com/sirupsen/logrus v1.7.0 ++ github.com/spf13/afero v1.2.2 + github.com/spf13/pflag v1.0.5 + github.com/stretchr/testify v1.6.1 +- golang.org/x/net v0.0.0-20200625001655-4c5254603344 // indirect +- golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d +- k8s.io/apimachinery v0.18.6 +- k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 +- sigs.k8s.io/controller-runtime v0.6.0 +- sigs.k8s.io/kustomize/kyaml v0.1.11 +- sigs.k8s.io/yaml v1.2.0 ++ golang.org/x/sys v0.0.0-20210108172913-0df2131ae363 ++ k8s.io/api v0.19.2 ++ k8s.io/apimachinery v0.19.6 ++ k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 ++ k8s.io/utils v0.0.0-20200912215256-4140de9c8800 ++ sigs.k8s.io/controller-runtime v0.7.0 ++ sigs.k8s.io/kustomize/kyaml v0.10.5 + ) +diff --git a/go.sum b/go.sum +index c1ecf37..b401269 100644 +--- a/go.sum ++++ b/go.sum +@@ -1,28 +1,34 @@ +-bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= + cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= + cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= + cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= ++cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= ++cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= ++cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= ++cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= ++cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= ++cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= ++cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= ++cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= ++cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= ++dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= + github.com/360EntSecGroup-Skylar/excelize v1.4.1/go.mod h1:vnax29X2usfl7HHkBrX5EvSCJcmH3dT9luvxzu8iGAE= + github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= + github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= ++github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= + github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= ++github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= + github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= ++github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= + github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= + github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= ++github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= + github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= + github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= ++github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= + github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= + github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +-github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= +-github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +-github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +-github.com/Masterminds/sprig/v3 v3.0.2/go.mod h1:oesJ8kPONMONaZgtiHNzUShJbksypC5kWczhZAf6+aU= +-github.com/Masterminds/vcs v1.13.1/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= +-github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +-github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= + github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= + github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +-github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= + github.com/PuerkitoBio/goquery v1.5.0/go.mod h1:qD2PgZ9lccMbQlc7eEOjaeRlFQON7xY8kdmcsrnKqMg= + github.com/PuerkitoBio/purell v1.0.0 h1:0GoNN3taZV6QI81IXgCbxMyEaJDXMSIjArYBCYzVVvs= + github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +@@ -33,12 +39,13 @@ github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2 h1:JCHLVE3B+kJd + github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= + github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +-github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= + github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= + github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs= + github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= + github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= ++github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= + github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= ++github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= + github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= + github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= + github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= +@@ -48,72 +55,46 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd + github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= + github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= + github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +-github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= + github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= + github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +-github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= ++github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= ++github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= + github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +-github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= + github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +-github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +-github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +-github.com/bombsimon/wsl v1.2.5/go.mod h1:43lEF/i0kpXbLCeDXL9LMT8c92HyBywXb0AsgMHYngM= + github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= + github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= + github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +-github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= ++github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= ++github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= + github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= + github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= + github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= + github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= + github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +-github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +-github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +-github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +-github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +-github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +-github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= +-github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +-github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +-github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +-github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= + github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= + github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +-github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= + github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= + github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= + github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= + github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= + github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= + github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +-github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= + github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +-github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= + github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= + github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= + github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +-github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +-github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= + github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= + github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= + github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +-github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= +-github.com/deislabs/oras v0.8.1/go.mod h1:Mx0rMSbBNaNfY9hjpccEnxkOqJL6KGjtxNHPLC4G4As= + github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= + github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +-github.com/docker/cli v0.0.0-20200130152716-5d0cf8839492/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +-github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +-github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +-github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +-github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +-github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= + github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= + github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= + github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= ++github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= + github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= + github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= + github.com/dustmop/soup v1.1.2-0.20190516214245-38228baa104e/go.mod h1:CgNC6SGbT+Xb8wGGvzilttZL1mc5sQ/5KkcxsZttMIk= +-github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= + github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= + github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633 h1:H2pdYOb3KQ1/YsqVWoWNLQO+fusocsw354rqGTZtAgw= + github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +@@ -123,18 +104,15 @@ github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg + github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= + github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= + github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +-github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= + github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +-github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= +-github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= ++github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= ++github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= + github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +-github.com/fluxcd/go-git-providers v0.0.2 h1:NGJeJl1TOJKbxtQkRL9JOk5lIopR1XNi6hGgZC5+8IE= +-github.com/fluxcd/go-git-providers v0.0.2/go.mod h1:2Fp9GDxIcllNR7pm5clXhInPyue4VggecaH83KhkpNw= +-github.com/fluxcd/kustomize-controller v0.0.1-beta.2/go.mod h1:mLeipvpQkyof6b5IHNtqeA8CmbjfVIf92UyKkpeBY98= +-github.com/fluxcd/source-controller v0.0.1-beta.2/go.mod h1:tmscNdCxEt7+Xt2g1+bI38hMPw2leYMFAaCn4UlMGuw= +-github.com/fluxcd/toolkit v0.0.1-beta.2 h1:JG80AUIGd936QJ6Vs/xZweoKcE6j7Loua5Wn6Q/pVh8= +-github.com/fluxcd/toolkit v0.0.1-beta.2/go.mod h1:NqDXj2aeVMbVkrCHeP/r0um+edXXyeGlG/9pKZLqGdM= ++github.com/fluxcd/go-git-providers v0.0.3 h1:pquQvTpd1a4V1efPyZWuVPeIKrTgV8QRoDY0VGH+qiw= ++github.com/fluxcd/go-git-providers v0.0.3/go.mod h1:iaXf3nEq8MB/LzxfbNcCl48sAtIReUU7jqjJ7CEnfFQ= ++github.com/fluxcd/pkg/ssh v0.0.5 h1:rnbFZ7voy2JBlUfMbfyqArX2FYaLNpDhccGFC3qW83A= ++github.com/fluxcd/pkg/ssh v0.0.5/go.mod h1:7jXPdXZpc0ttMNz2kD9QuMi3RNn/e0DOFbj0Tij/+Hs= + github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= + github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= + github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +@@ -147,27 +125,28 @@ github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= + github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= + github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= + github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +-github.com/go-critic/go-critic v0.3.5-0.20190904082202-d79a9f0c64db/go.mod h1:+sE8vrLDS2M0pZkBk0wy6+nLdKexVDrl/jBqQOTDThA= + github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= + github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= + github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= + github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= + github.com/go-git/go-billy/v5 v5.0.0 h1:7NQHvd9FVid8VL4qVUMm8XifBK+2xCoZ2lSk0agRrHM= + github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= +-github.com/go-git/go-git-fixtures/v4 v4.0.1 h1:q+IFMfLx200Q3scvt2hN79JsEzy4AmBTp/pqnefH+Bc= +-github.com/go-git/go-git-fixtures/v4 v4.0.1/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= +-github.com/go-git/go-git/v5 v5.0.0/go.mod h1:oYD8y9kWsGINPFJoLdaScGCN6dlKg23blmClfZwtUVA= +-github.com/go-git/go-git/v5 v5.1.0 h1:HxJn9g/E7eYvKW3Fm7Jt4ee8LXfPOm/H1cdDu8vEssk= +-github.com/go-git/go-git/v5 v5.1.0/go.mod h1:ZKfuPUoY1ZqIG4QG9BDBh3G4gLM5zvPuSJAozQrZuyM= ++github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12 h1:PbKy9zOy4aAKrJ5pibIRpVO2BXnK1Tlcg+caKI7Ox5M= ++github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= ++github.com/go-git/go-git/v5 v5.2.0 h1:YPBLG/3UK1we1ohRkncLjaXWLW+HKp5QNM/jTli2JgI= ++github.com/go-git/go-git/v5 v5.2.0/go.mod h1:kh02eMX+wdqqxgNMEyq8YgwlIOsDOa9homkUq1PoTMs= ++github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= + github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +-github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM= ++github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= + github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= + github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= + github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= + github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +-github.com/go-logr/zapr v0.1.0 h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54= +-github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= +-github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= ++github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= ++github.com/go-logr/logr v0.3.0 h1:q4c+kbcR0d5rSurhBR8dIgieOaYpXtsdTYfx22Cu6rs= ++github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= ++github.com/go-logr/zapr v0.2.0 h1:v6Ji8yBW77pva6NkJKQdHLAJKrIJKRHz0RXwPqCHSR4= ++github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU= + github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= + github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= + github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +@@ -184,6 +163,8 @@ github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9 + github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= + github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= + github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= ++github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= ++github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= + github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9 h1:tF+augKRWlWx0J0B7ZyyKSiTyV6E1zZe+7b3qQlcEf8= + github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= + github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +@@ -192,6 +173,8 @@ github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq + github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= + github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= + github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= ++github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= ++github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= + github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= + github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= + github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +@@ -209,8 +192,8 @@ github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wab + github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= + github.com/go-openapi/spec v0.19.5 h1:Xm0Ao53uqnk9QE/LlYV5DEU09UAgpliA85QoT9LzqPw= + github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +-github.com/go-openapi/spec v0.19.8 h1:qAdZLh1r6QF/hI/gTq+TJTvsQUodZsM7KLqkAJdiJNg= +-github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= ++github.com/go-openapi/spec v0.20.0 h1:HGLc8AJ7ynOxwv0Lq4TsnwLsWMawHAYiJIFzbcML86I= ++github.com/go-openapi/spec v0.20.0/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= + github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= + github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= + github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +@@ -224,71 +207,42 @@ github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88d + github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= + github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= + github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= ++github.com/go-openapi/swag v0.19.12 h1:Bc0bnY2c3AoF7Gc+IMIAQQsD8fLHjHpc19wXvYuayQI= ++github.com/go-openapi/swag v0.19.12/go.mod h1:eFdyEBkTdoAf/9RXBvj4cr1nH7GD8Kzo5HTt47gr72M= + github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= + github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= + github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= + github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= + github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +-github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= +-github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= +-github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +-github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +-github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086/go.mod h1:mP93XdblcopXwlyN4X4uodxXQhldPGZbcEJIimQHrkg= +-github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= +-github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= +-github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30/go.mod h1:SV2ur98SGypH1UjcPpCatrV5hPazG6+IfNHbkDXBRrk= +-github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= +-github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoMrjiy4zvdS+Bg6z9jZH82QXwkcgCBX6nOfnmdaHks= +-github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= +-github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= +-github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +-github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +-github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +-github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +-github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= ++github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= + github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= + github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +-github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= + github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= + github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= + github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +-github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= ++github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= ++github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA= ++github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= + github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= + github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +-github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= ++github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= + github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= + github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= + github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= + github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= + github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= + github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= ++github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= + github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= + github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= + github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= + github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= + github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= ++github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= + github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= + github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +-github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= +-github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= +-github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0= +-github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= +-github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3/go.mod h1:JXrF4TWy4tXYn62/9x8Wm/K/dm06p8tCKwFRDPZG/1o= +-github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU= +-github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= +-github.com/golangci/golangci-lint v1.21.0/go.mod h1:phxpHK52q7SE+5KpPnti4oZTdFCEsn/tKN+nFvCKXfk= +-github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU= +-github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= +-github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= +-github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= +-github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI= +-github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4= +-github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= +-github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho= +-github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8= +-github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= + github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= + github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= + github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +@@ -297,32 +251,31 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw + github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= + github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= + github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +-github.com/google/go-github/v32 v32.0.0 h1:q74KVb22spUq0U5HqZ9VCYqQz8YRuOtL/39ZnfwO+NM= +-github.com/google/go-github/v32 v32.0.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI= ++github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= ++github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= + github.com/google/go-github/v32 v32.1.0 h1:GWkQOdXqviCPx7Q7Fj+KyPoGm4SwHRh8rheoPhd27II= + github.com/google/go-github/v32 v32.1.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI= + github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= + github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +-github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= + github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= + github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= + github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= + github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= + github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= ++github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= ++github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= + github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +-github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= + github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= + github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= + github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +-github.com/googleapis/gnostic v0.3.0 h1:CcQijm0XKekKjP/YCz28LXVSpgguuB+nCxaSjCe09y0= +-github.com/googleapis/gnostic v0.3.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +-github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= ++github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= ++github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= ++github.com/googleapis/gnostic v0.5.1 h1:A8Yhf6EtqTv9RMsU6MQTyrtV1TjWlR6xU9BsZIwuTCM= ++github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= + github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= + github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +-github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +-github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= + github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= + github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +@@ -331,38 +284,36 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= + github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= + github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +-github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +-github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= + github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= + github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +-github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= + github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +-github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= +-github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= + github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= + github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= + github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= ++github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= ++github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= + github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= + github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= + github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +-github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= ++github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= + github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +-github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +-github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= + github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg= + github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= ++github.com/imdario/mergo v0.3.10 h1:6q5mVkdH/vYmqngx7kZQTjJ5HRsx+ImorDIEQ+beJgc= ++github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= + github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= + github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= + github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= + github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= + github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +-github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= ++github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= ++github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= + github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= + github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +-github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= +-github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= ++github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= ++github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= + github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +-github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a/go.mod h1:UJSiEoRfvx3hP73CvoARgeLjaIOjybY9vj8PUPPFGeU= ++github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= + github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= + github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= + github.com/k0kubun/pp v2.3.0+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg= +@@ -371,10 +322,6 @@ github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT + github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= + github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= + github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +-github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +-github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +-github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +-github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= + github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= + github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= + github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= +@@ -382,9 +329,9 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv + github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= + github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= + github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= ++github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= + github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= + github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +-github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= + github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= + github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= + github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +@@ -394,11 +341,6 @@ github.com/labstack/echo v3.3.10+incompatible h1:pGRcYk231ExFAyoAjAfD85kQzRJCRI8 + github.com/labstack/echo v3.3.10+incompatible/go.mod h1:0INS7j/VjnFxD4E2wkz67b8cVwCLbBmJyDaka6Cmk1s= + github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0= + github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= +-github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +-github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +-github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= +-github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= +-github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= + github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= + github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a h1:TpvdAwDAt1K4ANVOfcihouRdvP+MgAfDWwBuct4l6ZY= + github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +@@ -409,13 +351,12 @@ github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN + github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= + github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= + github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +-github.com/manifoldco/promptui v0.7.0/go.mod h1:n4zTdgP0vr0S3w7/O/g98U+e0gwLScEXGwov2nIKuGQ= +-github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= ++github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= ++github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= ++github.com/markbates/pkger v0.17.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= + github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= + github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= + github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +-github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= +-github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= + github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= + github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= + github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= +@@ -425,35 +366,27 @@ github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2y + github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= + github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= + github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +-github.com/mattn/go-shellwords v1.0.9/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +-github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= + github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= + github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +-github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +-github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= ++github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= ++github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= + github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= + github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +-github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= +-github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +-github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= + github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= + github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +-github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= ++github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= + github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +-github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= + github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= + github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= + github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +-github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +-github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= ++github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= + github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= + github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +-github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= + github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +@@ -463,44 +396,37 @@ github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:v + github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= + github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= + github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +-github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= + github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= + github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= + github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= + github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= + github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= ++github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4= ++github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= + github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= + github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= + github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +-github.com/onsi/gomega v1.8.1 h1:C5Dqfs/LeauYDX0jJXIe2SWmwCbGzx9yF8C8xy3Lh34= +-github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= + github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= + github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +-github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +-github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +-github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +-github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +-github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +-github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +-github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= ++github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs= ++github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= + github.com/paulmach/orb v0.1.3/go.mod h1:VFlX/8C+IQ1p6FTRRKzKoOPJnvEtA5G0Veuqwbu//Vk= + github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= + github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= + github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +-github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= + github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +-github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= + github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= + github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= + github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= + github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +-github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= + github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= + github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= + github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= + github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= + github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= + github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= ++github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= ++github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= + github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= + github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= + github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +@@ -509,155 +435,150 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T + github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= + github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= + github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= ++github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= ++github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= + github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= + github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= + github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +-github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= ++github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= ++github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= + github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= + github.com/qri-io/starlib v0.4.2-0.20200213133954-ff2e8cd5ef8d/go.mod h1:7DPO4domFU579Ga6E61sB9VFNaniPVwJP5C4bBCu3wA= +-github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= +-github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= + github.com/rjeczalik/notify v0.9.2 h1:MiTWrPj55mNDHEiIX5YUSKefw/+lCQVoAFmD6oQm5w8= + github.com/rjeczalik/notify v0.9.2/go.mod h1:aErll2f0sUX9PXZnVNyeiObbmTlk5jnMoCa4QEjJeqM= + github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= + github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +-github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= + github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +-github.com/securego/gosec v0.0.0-20191002120514-e680875ea14d/go.mod h1:w5+eXa0mYznDkHaMCXA4XYffjlH+cy1oyKbfzJXa2Do= + github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= + github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= + github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +-github.com/shirou/gopsutil v0.0.0-20190901111213-e4ec7b275ada/go.mod h1:WWnYX4lzhCH5h/3YBfyVA3VbLYjlMZZAQcW9ojMexNc= +-github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= +-github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +-github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= + github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +-github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= + github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +-github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= + github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= + github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= + github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= + github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= ++github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= ++github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= + github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +-github.com/sosedoff/gitkit v0.2.1-0.20191202022816-7182d43c6254/go.mod h1:A+o6ZazfVJwetlcHz3ah6th66XcBdsyzLo+aBt/AsK4= +-github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE= + github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= + github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= ++github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= + github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= + github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +-github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= + github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +-github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= + github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= + github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= + github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +-github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= + github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= + github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= + github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= + github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +-github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= + github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= ++github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= + github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= + github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= + github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= + github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +-github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= + github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= + github.com/stretchr/testify v1.2.3-0.20181224173747-660f15d67dbb/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= + github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= + github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= + github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= + github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= ++github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= + github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= + github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +-github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= + github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +-github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= + github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= + github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= + github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +-github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +-github.com/ulikunitz/xz v0.5.5/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= +-github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= +-github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= +-github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= + github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +-github.com/uudashr/gocognit v0.0.0-20190926065955-1655d0de0517/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= + github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= + github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +-github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= + github.com/valyala/fasttemplate v1.0.1 h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8WdUSz8= + github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +-github.com/valyala/quicktemplate v1.2.0/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4= +-github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= + github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= + github.com/xanzy/go-gitlab v0.33.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug= + github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= + github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= +-github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +-github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +-github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +-github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= + github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +-github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8= + github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= + github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +-github.com/yujunz/go-getter v1.4.1-lite/go.mod h1:sbmqxXjyLunH1PkF3n7zSlnVeMvmYUuIl9ZVs/7NyCc= ++github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= + go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= + go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +-go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= ++go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= ++go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= + go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= + go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= + go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= + go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= + go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= ++go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= + go.starlark.net v0.0.0-20190528202925-30ae18b8564f/go.mod h1:c1/X6cHgvdXj6pUlmWKMkuqRnW4K8x2vwt6JAaaircg= + go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= + go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= + go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= + go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= ++go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= ++go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= ++go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= ++go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= + go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= + go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= ++go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= ++go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= ++go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= ++go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= ++go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= + go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= + go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +-golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= ++go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM= ++go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= + golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +-golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +-golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= + golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= + golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= + golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= ++golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= + golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= + golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= + golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +-golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +-golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +-golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +-golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +-golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= ++golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= ++golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= + golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073 h1:xMPOj6Pz6UipU1wXLkrtqpHbR0AVFnyPEQq/wRWz9lM= + golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= + golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= + golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= + golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +-golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +-golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= ++golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= ++golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= ++golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= ++golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= + golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= ++golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= + golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= + golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= + golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= + golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= ++golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= ++golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= ++golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= ++golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= ++golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= + golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= ++golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= + golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +-golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= ++golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= ++golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= ++golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= ++golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= ++golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= + golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= + golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= + golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= + golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +-golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= + golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= + golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= + golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +@@ -669,26 +590,32 @@ golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn + golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= + golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= + golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= ++golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= + golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= ++golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= + golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= + golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= + golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= + golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= + golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +-golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= + golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= ++golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= + golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= + golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= + golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= ++golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= + golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +-golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4= +-golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= ++golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= ++golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= ++golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= + golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= + golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= + golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= + golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= + golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= ++golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs= ++golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +@@ -696,7 +623,6 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ + golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= + golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= + golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +-golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= + golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= + golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= + golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +@@ -704,9 +630,6 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h + golang.org/x/sys v0.0.0-20180926160741-c2ed4eda69e7/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= + golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= + golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +-golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +-golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +-golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= + golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= + golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= + golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +@@ -716,88 +639,116 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w + golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc= + golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +-golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4= + golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +-golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +-golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +-golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +-golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d h1:QQrM/CCYEzTs91GZylDCQjGHudbPTxF/1fvXdVh5lMo= +-golang.org/x/sys v0.0.0-20200812155832-6a926be9bd1d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +-golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= ++golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY= ++golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= ++golang.org/x/sys v0.0.0-20210108172913-0df2131ae363 h1:wHn06sgWHMO1VsQ8F+KzDJx/JzqfsNLnc+oEi07qD7s= ++golang.org/x/sys v0.0.0-20210108172913-0df2131ae363/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= + golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= + golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= + golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= + golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= ++golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= ++golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= ++golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= ++golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= + golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= + golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= + golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= + golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= ++golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= + golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= ++golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= ++golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= + golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +-golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= + golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= + golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= + golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +-golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +-golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= + golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= + golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +-golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= + golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= + golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +-golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= + golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= + golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +-golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +-golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= ++golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= ++golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= + golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= ++golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= + golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59 h1:QjA/9ArTfVTLfEhClDCG7SGrZkZixxWpwNCDiwJfh88= + golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= + golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= + golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +-golang.org/x/tools v0.0.0-20190719005602-e377ae9d6386/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +-golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +-golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +-golang.org/x/tools v0.0.0-20190930201159-7c411dea38b0/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +-golang.org/x/tools v0.0.0-20191010075000-0337d82405ff h1:XdBG6es/oFDr1HwaxkxgVve7NB281QhxgK/i4voubFs= +-golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= ++golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= ++golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= ++golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= ++golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= ++golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= ++golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= ++golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= ++golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= ++golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= ++golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= ++golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= ++golang.org/x/tools v0.0.0-20200616133436-c1934b75d054 h1:HHeAlu5H9b71C+Fx0K+1dGgVFN1DM1/wz4aoGOA5qS8= ++golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= + golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= ++golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= + golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= + golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +-gomodules.xyz/jsonpatch/v2 v2.0.1 h1:xyiBuvkD2g5n7cYzx6u2sxQvsAy4QJsZFCzGVdzOXZ0= +-gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= +-gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= +-gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +-gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= ++gomodules.xyz/jsonpatch/v2 v2.1.0 h1:Phva6wqu+xR//Njw6iorylFFgn/z547tw5Ne3HZPQ+k= ++gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= + google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= ++google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= ++google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= ++google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= ++google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= + google.golang.org/appengine v1.0.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= + google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= + google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= + google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= + google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= + google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= ++google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= ++google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= ++google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= ++google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= + google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= + google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= + google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= + google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= + google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= ++google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= + google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= ++google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= ++google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= ++google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= + google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= + google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= + google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= ++google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= + google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +-google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= + google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= + google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= + google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +@@ -805,9 +756,12 @@ google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ + google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= + google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= + google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= ++google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= + google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= + google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +-gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= ++google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= ++google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= ++google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= + gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= + gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +@@ -820,7 +774,6 @@ gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS + gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= + gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= + gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +-gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= + gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= + gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= + gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +@@ -835,93 +788,58 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= + gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= ++gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= + gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= + gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= + gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +-gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2 h1:XZx7nhd5GMaZpmDaEHFVafUZC7ya0fuo7cSJ3UCKYmM= +-gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= ++gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= ++gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= + gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= + gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= ++gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= ++gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= + gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +-helm.sh/helm/v3 v3.1.2/go.mod h1:WYsFJuMASa/4XUqLyv54s0U/f3mlAaRErGmyy4z921g= ++gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= + honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= + honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= ++honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= + honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= ++honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= + honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +-k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI= +-k8s.io/api v0.17.2/go.mod h1:BS9fjjLc4CMuqfSO8vgbHPKMt5+SF0ET6u/RVDihTo4= +-k8s.io/api v0.18.2 h1:wG5g5ZmSVgm5B+eHMIbI9EGATS2L8Z72rda19RIEgY8= +-k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= +-k8s.io/apiextensions-apiserver v0.17.2/go.mod h1:4KdMpjkEjjDI2pPfBA15OscyNldHWdBCfsWMDWAmSTs= +-k8s.io/apiextensions-apiserver v0.18.2 h1:I4v3/jAuQC+89L3Z7dDgAiN4EOjN6sbm6iBqQwHTah8= +-k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJFlKL37fC4ZvY= +-k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= +-k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg= +-k8s.io/apimachinery v0.18.2 h1:44CmtbmkzVDAhCpRVSiP2R5PPrC2RtlIv/MoB8xpdRA= +-k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= +-k8s.io/apimachinery v0.18.6 h1:RtFHnfGNfd1N0LeSrKCUznz5xtUP1elRGvHJbL3Ntag= +-k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= +-k8s.io/apiserver v0.17.2/go.mod h1:lBmw/TtQdtxvrTk0e2cgtOxHizXI+d0mmGQURIHQZlo= +-k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw= +-k8s.io/cli-runtime v0.17.2/go.mod h1:aa8t9ziyQdbkuizkNLAw3qe3srSyWh9zlSB7zTqRNPI= +-k8s.io/client-go v0.17.0/go.mod h1:TYgR6EUHs6k45hb6KWjVD6jFZvJV4gHDikv/It0xz+k= +-k8s.io/client-go v0.17.2/go.mod h1:QAzRgsa0C2xl4/eVpeVAZMvikCn8Nm81yqVx3Kk9XYI= +-k8s.io/client-go v0.18.2 h1:aLB0iaD4nmwh7arT2wIn+lMnAq7OswjaejkQ8p9bBYE= +-k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU= +-k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= +-k8s.io/code-generator v0.18.2 h1:C1Nn2JiMf244CvBDKVPX0W2mZFJkVBg54T8OV7/Imso= +-k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= +-k8s.io/component-base v0.17.2/go.mod h1:zMPW3g5aH7cHJpKYQ/ZsGMcgbsA/VyhEugF3QT1awLs= +-k8s.io/component-base v0.18.2 h1:SJweNZAGcUvsypLGNPNGeJ9UgPZQ6+bW+gEHe8uyh/Y= +-k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM= +-k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +-k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +-k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 h1:RPscN6KhmG54S33L+lr3GS+oD1jmchIU0ll519K6FA4= +-k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +-k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +-k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +-k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +-k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +-k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= +-k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDNZ16KJtb49HfR3ZHB9qYXM= +-k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +-k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY= +-k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +-k8s.io/kubectl v0.17.2/go.mod h1:y4rfLV0n6aPmvbRCqZQjvOp3ezxsFgpqL+zF5jH/lxk= +-k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +-k8s.io/metrics v0.17.2/go.mod h1:3TkNHET4ROd+NfzNxkjoVfQ0Ob4iZnaHmSEA4vYpwLw= +-k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +-k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU= +-k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +-modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= +-modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= +-modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= +-modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +-modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= +-mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= +-mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= +-mvdan.cc/unparam v0.0.0-20190720180237-d51796306d8f/go.mod h1:4G1h5nDURzA3bwVMZIVpwbkw+04kSxk3rAtzlimaUJw= +-rsc.io/letsencrypt v0.0.3/go.mod h1:buyQKZ6IXrRnB7TdkHP0RyEybLx18HHyOSoTyoOLqNY= +-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= +-sigs.k8s.io/controller-runtime v0.5.0/go.mod h1:REiJzC7Y00U+2YkMbT8wxgrsX5USpXKGhb2sCtAXiT8= +-sigs.k8s.io/controller-runtime v0.6.0 h1:Fzna3DY7c4BIP6KwfSlrfnj20DJ+SeMBK8HSFvOk9NM= +-sigs.k8s.io/controller-runtime v0.6.0/go.mod h1:CpYf5pdNY/B352A1TFLAS2JVSlnGQ5O2cftPHndTroo= +-sigs.k8s.io/kustomize v2.0.3+incompatible h1:JUufWFNlI44MdtnjUqVnvh29rR37PQFzPbLXqhyOyX0= +-sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= +-sigs.k8s.io/kustomize/api v0.4.1/go.mod h1:NqxqT+wbYHrD0P19Uu4dXiMsVwI1IwQs+MJHlLhmPqQ= +-sigs.k8s.io/kustomize/kyaml v0.1.11 h1:/VvWxVIgH5gG1K4A7trgbyLgO3tRBiAWNhLFVU1HEmo= +-sigs.k8s.io/kustomize/kyaml v0.1.11/go.mod h1:72/rLkSi+L/pHM1oCjwrf3ClU+tH5kZQvvdLSqIHwWU= +-sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= +-sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06 h1:zD2IemQ4LmOcAumeiyDWXKUI2SO0NYDe3H6QGvPOVgU= +-sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18= +-sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +-sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= +-sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= ++k8s.io/api v0.19.2 h1:q+/krnHWKsL7OBZg/rxnycsl9569Pud76UJ77MvKXms= ++k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI= ++k8s.io/apiextensions-apiserver v0.19.2 h1:oG84UwiDsVDu7dlsGQs5GySmQHCzMhknfhFExJMz9tA= ++k8s.io/apiextensions-apiserver v0.19.2/go.mod h1:EYNjpqIAvNZe+svXVx9j4uBaVhTB4C94HkY3w058qcg= ++k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= ++k8s.io/apimachinery v0.19.6 h1:kBLzSGuDdY1NdSV2uFzI+FwZ9wtkmG+X3ZVcWXSqNgA= ++k8s.io/apimachinery v0.19.6/go.mod h1:6sRbGRAVY5DOCuZwB5XkqguBqpqLU6q/kOaOdk29z6Q= ++k8s.io/apiserver v0.19.2/go.mod h1:FreAq0bJ2vtZFj9Ago/X0oNGC51GfubKK/ViOKfVAOA= ++k8s.io/client-go v0.19.2 h1:gMJuU3xJZs86L1oQ99R4EViAADUPMHHtS9jFshasHSc= ++k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA= ++k8s.io/code-generator v0.19.2/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= ++k8s.io/component-base v0.19.2 h1:jW5Y9RcZTb79liEhW3XDVTW7MuvEGP0tQZnfSX6/+gs= ++k8s.io/component-base v0.19.2/go.mod h1:g5LrsiTiabMLZ40AR6Hl45f088DevyGY+cCE2agEIVo= ++k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= ++k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= ++k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= ++k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A= ++k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= ++k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ= ++k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= ++k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= ++k8s.io/utils v0.0.0-20200912215256-4140de9c8800 h1:9ZNvfPvVIEsp/T1ez4GQuzCcCTEQWhovSofhqR73A6g= ++k8s.io/utils v0.0.0-20200912215256-4140de9c8800/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= ++rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= ++sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= ++sigs.k8s.io/controller-runtime v0.7.0 h1:bU20IBBEPccWz5+zXpLnpVsgBYxqclaHu1pVDl/gEt8= ++sigs.k8s.io/controller-runtime v0.7.0/go.mod h1:pJ3YBrJiAqMAZKi6UVGuE98ZrroV1p+pIhoHsMm9wdU= ++sigs.k8s.io/kustomize/kyaml v0.10.5 h1:PbJcsZsEM7O3hHtUWTR+4WkHVbQRW9crSy75or1gRbI= ++sigs.k8s.io/kustomize/kyaml v0.10.5/go.mod h1:P6Oy/ah/GZMKzJMIJA2a3/bc8YrBkuL5kJji13PSIzY= ++sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= ++sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= + sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= + sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= + sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= + sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +-sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= +-vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= +diff --git a/hack/generate-client.sh b/hack/generate-client.sh +deleted file mode 100755 +index b7e5853..0000000 +--- a/hack/generate-client.sh ++++ /dev/null +@@ -1,16 +0,0 @@ +-#!/bin/bash +- +-SCRIPT_DIR=$( dirname "${BASH_SOURCE[0]}" ) +-cd ${SCRIPT_DIR}/.. +- +-RESOURCES="Car Motorcycle" +-CLIENT_NAME=SampleInternal +-OUT_DIR=cmd/sample-app/client +-API_DIR="github.com/weaveworks/libgitops/cmd/sample-app/apis/sample" +-mkdir -p ${OUT_DIR} +-for Resource in ${RESOURCES}; do +- resource=$(echo "${Resource}" | awk '{print tolower($0)}') +- sed -e "s|Resource|${Resource}|g;s|resource|${resource}|g;/build ignore/d;s|API_DIR|${API_DIR}|g;s|*Client|*${CLIENT_NAME}Client|g" \ +- pkg/client/client_resource_template.go > \ +- ${OUT_DIR}/zz_generated.client_${resource}.go +-done +diff --git a/pkg/client/client_dynamic.go b/pkg/client/client_dynamic.go +deleted file mode 100644 +index 5f3ac2a..0000000 +--- a/pkg/client/client_dynamic.go ++++ /dev/null +@@ -1,97 +0,0 @@ +-// +build ignore +- +-package client +- +-import ( +- "fmt" +- +- "github.com/weaveworks/libgitops/pkg/runtime" +- "github.com/weaveworks/libgitops/pkg/storage" +- "github.com/weaveworks/libgitops/pkg/storage/filterer" +- "k8s.io/apimachinery/pkg/runtime/schema" +-) +- +-// DynamicClient is an interface for accessing API types generically +-type DynamicClient interface { +- // New returns a new Object of its kind +- New() runtime.Object +- // Get returns an Object matching the UID from the storage +- Get(runtime.UID) (runtime.Object, error) +- // Set saves an Object into the persistent storage +- Set(runtime.Object) error +- // Patch performs a strategic merge patch on the object with +- // the given UID, using the byte-encoded patch given +- Patch(runtime.UID, []byte) error +- // Find returns an Object based on the given filter, filters can +- // match e.g. the Object's Name, UID or a specific property +- Find(filter filterer.BaseFilter) (runtime.Object, error) +- // FindAll returns multiple Objects based on the given filter, filters can +- // match e.g. the Object's Name, UID or a specific property +- FindAll(filter filterer.BaseFilter) ([]runtime.Object, error) +- // Delete deletes an Object from the storage +- Delete(uid runtime.UID) error +- // List returns a list of all Objects available +- List() ([]runtime.Object, error) +-} +- +-// dynamicClient is a struct implementing the DynamicClient interface +-// It uses a shared storage instance passed from the Client together with its own Filterer +-type dynamicClient struct { +- storage storage.Storage +- gvk schema.GroupVersionKind +- filterer *filterer.Filterer +-} +- +-// NewDynamicClient builds the dynamicClient struct using the storage implementation and a new Filterer +-func NewDynamicClient(s storage.Storage, gvk schema.GroupVersionKind) DynamicClient { +- return &dynamicClient{ +- storage: s, +- gvk: gvk, +- filterer: filterer.NewFilterer(s), +- } +-} +- +-// New returns a new Object of its kind +-func (c *dynamicClient) New() runtime.Object { +- obj, err := c.storage.New(c.gvk) +- if err != nil { +- panic(fmt.Sprintf("Client.New must not return an error: %v", err)) +- } +- return obj +-} +- +-// Get returns an Object based the given UID +-func (c *dynamicClient) Get(uid runtime.UID) (runtime.Object, error) { +- return c.storage.Get(c.gvk, uid) +-} +- +-// Set saves an Object into the persistent storage +-func (c *dynamicClient) Set(resource runtime.Object) error { +- return c.storage.Set(c.gvk, resource) +-} +- +-// Patch performs a strategic merge patch on the object with +-// the given UID, using the byte-encoded patch given +-func (c *dynamicClient) Patch(uid runtime.UID, patch []byte) error { +- return c.storage.Patch(c.gvk, uid, patch) +-} +- +-// Find returns an Object based on a given Filter +-func (c *dynamicClient) Find(filter filterer.BaseFilter) (runtime.Object, error) { +- return c.filterer.Find(c.gvk, filter) +-} +- +-// FindAll returns multiple Objects based on a given Filter +-func (c *dynamicClient) FindAll(filter filterer.BaseFilter) ([]runtime.Object, error) { +- return c.filterer.FindAll(c.gvk, filter) +-} +- +-// Delete deletes the Object from the storage +-func (c *dynamicClient) Delete(uid runtime.UID) error { +- return c.storage.Delete(c.gvk, uid) +-} +- +-// List returns a list of all Objects available +-func (c *dynamicClient) List() ([]runtime.Object, error) { +- return c.storage.List(c.gvk) +-} +diff --git a/pkg/client/client_resource_template.go b/pkg/client/client_resource_template.go +deleted file mode 100644 +index 53bc874..0000000 +--- a/pkg/client/client_resource_template.go ++++ /dev/null +@@ -1,152 +0,0 @@ +-// +build ignore +- +-/* +- Note: This file is autogenerated! Do not edit it manually! +- Edit client_resource_template.go instead, and run +- hack/generate-client.sh afterwards. +-*/ +- +-package client +- +-import ( +- "fmt" +- +- api "API_DIR" +- +- log "github.com/sirupsen/logrus" +- "github.com/weaveworks/libgitops/pkg/runtime" +- "github.com/weaveworks/libgitops/pkg/storage" +- "github.com/weaveworks/libgitops/pkg/storage/filterer" +- "k8s.io/apimachinery/pkg/runtime/schema" +-) +- +-// ResourceClient is an interface for accessing Resource-specific API objects +-type ResourceClient interface { +- // New returns a new Resource +- New() *api.Resource +- // Get returns the Resource matching given UID from the storage +- Get(runtime.UID) (*api.Resource, error) +- // Set saves the given Resource into persistent storage +- Set(*api.Resource) error +- // Patch performs a strategic merge patch on the object with +- // the given UID, using the byte-encoded patch given +- Patch(runtime.UID, []byte) error +- // Find returns the Resource matching the given filter, filters can +- // match e.g. the Object's Name, UID or a specific property +- Find(filter filterer.BaseFilter) (*api.Resource, error) +- // FindAll returns multiple Resources matching the given filter, filters can +- // match e.g. the Object's Name, UID or a specific property +- FindAll(filter filterer.BaseFilter) ([]*api.Resource, error) +- // Delete deletes the Resource with the given UID from the storage +- Delete(uid runtime.UID) error +- // List returns a list of all Resources available +- List() ([]*api.Resource, error) +-} +- +-// Resources returns the ResourceClient for the Client object +-func (c *Client) Resources() ResourceClient { +- if c.resourceClient == nil { +- c.resourceClient = newResourceClient(c.storage, c.gv) +- } +- +- return c.resourceClient +-} +- +-// resourceClient is a struct implementing the ResourceClient interface +-// It uses a shared storage instance passed from the Client together with its own Filterer +-type resourceClient struct { +- storage storage.Storage +- filterer *filterer.Filterer +- gvk schema.GroupVersionKind +-} +- +-// newResourceClient builds the resourceClient struct using the storage implementation and a new Filterer +-func newResourceClient(s storage.Storage, gv schema.GroupVersion) ResourceClient { +- return &resourceClient{ +- storage: s, +- filterer: filterer.NewFilterer(s), +- gvk: gv.WithKind(api.KindResource.Title()), +- } +-} +- +-// New returns a new Object of its kind +-func (c *resourceClient) New() *api.Resource { +- log.Tracef("Client.New; GVK: %v", c.gvk) +- obj, err := c.storage.New(c.gvk) +- if err != nil { +- panic(fmt.Sprintf("Client.New must not return an error: %v", err)) +- } +- return obj.(*api.Resource) +-} +- +-// Find returns a single Resource based on the given Filter +-func (c *resourceClient) Find(filter filterer.BaseFilter) (*api.Resource, error) { +- log.Tracef("Client.Find; GVK: %v", c.gvk) +- object, err := c.filterer.Find(c.gvk, filter) +- if err != nil { +- return nil, err +- } +- +- return object.(*api.Resource), nil +-} +- +-// FindAll returns multiple Resources based on the given Filter +-func (c *resourceClient) FindAll(filter filterer.BaseFilter) ([]*api.Resource, error) { +- log.Tracef("Client.FindAll; GVK: %v", c.gvk) +- matches, err := c.filterer.FindAll(c.gvk, filter) +- if err != nil { +- return nil, err +- } +- +- results := make([]*api.Resource, 0, len(matches)) +- for _, item := range matches { +- results = append(results, item.(*api.Resource)) +- } +- +- return results, nil +-} +- +-// Get returns the Resource matching given UID from the storage +-func (c *resourceClient) Get(uid runtime.UID) (*api.Resource, error) { +- log.Tracef("Client.Get; UID: %q, GVK: %v", uid, c.gvk) +- object, err := c.storage.Get(c.gvk, uid) +- if err != nil { +- return nil, err +- } +- +- return object.(*api.Resource), nil +-} +- +-// Set saves the given Resource into the persistent storage +-func (c *resourceClient) Set(resource *api.Resource) error { +- log.Tracef("Client.Set; UID: %q, GVK: %v", resource.GetUID(), c.gvk) +- return c.storage.Set(c.gvk, resource) +-} +- +-// Patch performs a strategic merge patch on the object with +-// the given UID, using the byte-encoded patch given +-func (c *resourceClient) Patch(uid runtime.UID, patch []byte) error { +- return c.storage.Patch(c.gvk, uid, patch) +-} +- +-// Delete deletes the Resource from the storage +-func (c *resourceClient) Delete(uid runtime.UID) error { +- log.Tracef("Client.Delete; UID: %q, GVK: %v", uid, c.gvk) +- return c.storage.Delete(c.gvk, uid) +-} +- +-// List returns a list of all Resources available +-func (c *resourceClient) List() ([]*api.Resource, error) { +- log.Tracef("Client.List; GVK: %v", c.gvk) +- list, err := c.storage.List(c.gvk) +- if err != nil { +- return nil, err +- } +- +- results := make([]*api.Resource, 0, len(list)) +- for _, item := range list { +- results = append(results, item.(*api.Resource)) +- } +- +- return results, nil +-} +diff --git a/pkg/filter/interfaces.go b/pkg/filter/interfaces.go +index 62d3cd3..a097112 100644 +--- a/pkg/filter/interfaces.go ++++ b/pkg/filter/interfaces.go +@@ -1,48 +1,20 @@ + package filter + +-import "github.com/weaveworks/libgitops/pkg/runtime" ++import ( ++ "errors" + +-// ListFilter is an interface for pipe-like list filtering behavior. +-type ListFilter interface { +- // Filter walks through all objects in obj, assesses whether the object +- // matches the filter parameters, and conditionally adds it to the return +- // slice or not. This method can be thought of like an UNIX pipe. +- Filter(objs ...runtime.Object) ([]runtime.Object, error) +-} ++ "sigs.k8s.io/controller-runtime/pkg/client" ++) ++ ++var ( ++ // ErrInvalidFilterParams describes an error where invalid parameters were given ++ // to a filter. ++ ErrInvalidFilterParams = errors.New("invalid parameters given to filter") ++) + + // ObjectFilter is an interface for filtering objects one-by-one. + type ObjectFilter interface { +- // Filter takes in one object (at once, per invocation), and returns a ++ // Match takes in one object (at once, per invocation), and returns a + // boolean whether the object matches the filter parameters, or not. +- Filter(obj runtime.Object) (bool, error) +-} +- +-// ObjectToListFilter transforms an ObjectFilter into a ListFilter. If of is nil, +-// this function panics. +-func ObjectToListFilter(of ObjectFilter) ListFilter { +- if of == nil { +- panic("programmer error: of ObjectFilter must not be nil in ObjectToListFilter") +- } +- return &objectToListFilter{of} +-} +- +-type objectToListFilter struct { +- of ObjectFilter +-} +- +-// Filter implements ListFilter, but uses an ObjectFilter for the underlying logic. +-func (f objectToListFilter) Filter(objs ...runtime.Object) (retarr []runtime.Object, err error) { +- // Walk through all objects +- for _, obj := range objs { +- // Match them one-by-one against the ObjectFilter +- match, err := f.of.Filter(obj) +- if err != nil { +- return nil, err +- } +- // If the object matches, include it in the return array +- if match { +- retarr = append(retarr, obj) +- } +- } +- return ++ Match(obj client.Object) (bool, error) + } +diff --git a/pkg/filter/labels.go b/pkg/filter/labels.go +new file mode 100644 +index 0000000..24ef9f1 +--- /dev/null ++++ b/pkg/filter/labels.go +@@ -0,0 +1,46 @@ ++package filter ++ ++import ( ++ "fmt" ++ ++ "k8s.io/apimachinery/pkg/labels" ++ "sigs.k8s.io/controller-runtime/pkg/client" ++) ++ ++// LabelsFilter implements ObjectFilter and FilterOption. ++// It also implements client.{List,DeleteAllOf}Option so ++// it can be passed into client.Client.{List,DeleteAllOf} ++// as a way to conveniently filter those lists. ++var _ ObjectFilter = LabelsFilter{} ++var _ FilterOption = LabelsFilter{} ++var _ client.ListOption = LabelsFilter{} ++var _ client.DeleteAllOfOption = LabelsFilter{} ++ ++// LabelsFilter is an ObjectFilter that compares metav1.Object.GetLabels() ++// to the LabelSelector field. ++type LabelsFilter struct { ++ // LabelSelector filters results by label. Use SetLabelSelector to ++ // set from raw string form. ++ // +required ++ LabelSelector labels.Selector ++} ++ ++// Match implements ObjectFilter ++func (f LabelsFilter) Match(obj client.Object) (bool, error) { ++ // Require f.Namespace to always be set. ++ if f.LabelSelector == nil { ++ return false, fmt.Errorf("the LabelsFilter.LabelSelector field must not be nil: %w", ErrInvalidFilterParams) ++ } ++ ++ return f.LabelSelector.Matches(labels.Set(obj.GetLabels())), nil ++} ++ ++// ApplyToList implements client.ListOption, but is just a "dummy" implementation in order to implement ++// the interface, so that this struct can be passed to client.Reader.List() ++func (f LabelsFilter) ApplyToList(_ *client.ListOptions) {} ++func (f LabelsFilter) ApplyToDeleteAllOf(_ *client.DeleteAllOfOptions) {} ++ ++// ApplyToFilterOptions implements FilterOption ++func (f LabelsFilter) ApplyToFilterOptions(target *FilterOptions) { ++ target.ObjectFilters = append(target.ObjectFilters, f) ++} +diff --git a/pkg/filter/name.go b/pkg/filter/name.go +index 42e516c..ade3d99 100644 +--- a/pkg/filter/name.go ++++ b/pkg/filter/name.go +@@ -4,40 +4,36 @@ import ( + "fmt" + "strings" + +- "github.com/weaveworks/libgitops/pkg/runtime" ++ "sigs.k8s.io/controller-runtime/pkg/client" + ) + +-// NameFilter implements ObjectFilter and ListOption. ++// NameFilter implements ObjectFilter and FilterOption. ++// It also implements client.{List,DeleteAllOf}Option so ++// it can be passed into client.Client.{List,DeleteAllOf} ++// as a way to conveniently filter those lists. + var _ ObjectFilter = NameFilter{} +-var _ ListOption = NameFilter{} ++var _ FilterOption = NameFilter{} ++var _ client.ListOption = NameFilter{} ++var _ client.DeleteAllOfOption = NameFilter{} + +-// NameFilter is an ObjectFilter that compares runtime.Object.GetName() ++// NameFilter is an ObjectFilter that compares Object.GetName() + // to the Name field by either equality or prefix. + type NameFilter struct { + // Name matches the object by .metadata.name. + // +required + Name string +- // Namespace matches the object by .metadata.namespace. If left as +- // an empty string, it is ignored when filtering. +- // +optional +- Namespace string +- // MatchPrefix whether the name (not namespace) matching should be exact, or prefix-based. ++ // MatchPrefix whether the name matching should be exact, or prefix-based. + // +optional + MatchPrefix bool + } + +-// Filter implements ObjectFilter +-func (f NameFilter) Filter(obj runtime.Object) (bool, error) { ++// Match implements ObjectFilter ++func (f NameFilter) Match(obj client.Object) (bool, error) { + // Require f.Name to always be set. + if len(f.Name) == 0 { + return false, fmt.Errorf("the NameFilter.Name field must not be empty: %w", ErrInvalidFilterParams) + } + +- // If f.Namespace is set, and it does not match the object, return false +- if len(f.Namespace) > 0 && f.Namespace != obj.GetNamespace() { +- return false, nil +- } +- + // If the Name should be matched by the prefix, use strings.HasPrefix + if f.MatchPrefix { + return strings.HasPrefix(obj.GetName(), f.Name), nil +@@ -46,9 +42,12 @@ func (f NameFilter) Filter(obj runtime.Object) (bool, error) { + return f.Name == obj.GetName(), nil + } + +-// ApplyToListOptions implements ListOption, and adds itself converted to +-// a ListFilter to ListOptions.Filters. +-func (f NameFilter) ApplyToListOptions(target *ListOptions) error { +- target.Filters = append(target.Filters, ObjectToListFilter(f)) +- return nil ++// ApplyToList implements client.ListOption, but is just a "dummy" implementation in order to implement ++// the interface, so that this struct can be passed to client.Reader.List() ++func (f NameFilter) ApplyToList(_ *client.ListOptions) {} ++func (f NameFilter) ApplyToDeleteAllOf(_ *client.DeleteAllOfOptions) {} ++ ++// ApplyToFilterOptions implements FilterOption ++func (f NameFilter) ApplyToFilterOptions(target *FilterOptions) { ++ target.ObjectFilters = append(target.ObjectFilters, f) + } +diff --git a/pkg/filter/namespace.go b/pkg/filter/namespace.go +new file mode 100644 +index 0000000..ae1c884 +--- /dev/null ++++ b/pkg/filter/namespace.go +@@ -0,0 +1,45 @@ ++package filter ++ ++import ( ++ "fmt" ++ ++ "sigs.k8s.io/controller-runtime/pkg/client" ++) ++ ++// NamespaceFilter implements ObjectFilter and FilterOption. ++// It also implements client.{List,DeleteAllOf}Option so ++// it can be passed into client.Client.{List,DeleteAllOf} ++// as a way to conveniently filter those lists. ++var _ ObjectFilter = NamespaceFilter{} ++var _ FilterOption = NamespaceFilter{} ++var _ client.ListOption = NamespaceFilter{} ++var _ client.DeleteAllOfOption = NamespaceFilter{} ++ ++// NamespaceFilter is an ObjectFilter that compares Object.GetNamespace() ++// to the Namespace field. ++type NamespaceFilter struct { ++ // Namespace matches the object by .metadata.namespace. If left as ++ // an empty string, it is ignored when filtering. ++ // +required ++ Namespace string ++} ++ ++// Match implements ObjectFilter ++func (f NamespaceFilter) Match(obj client.Object) (bool, error) { ++ // Require f.Namespace to always be set. ++ if len(f.Namespace) == 0 { ++ return false, fmt.Errorf("the NamespaceFilter.Namespace field must not be empty: %w", ErrInvalidFilterParams) ++ } ++ // Otherwise, just use an equality check ++ return f.Namespace == obj.GetNamespace(), nil ++} ++ ++// ApplyToList implements client.ListOption, but is just a "dummy" implementation in order to implement ++// the interface, so that this struct can be passed to client.Reader.List() ++func (f NamespaceFilter) ApplyToList(_ *client.ListOptions) {} ++func (f NamespaceFilter) ApplyToDeleteAllOf(_ *client.DeleteAllOfOptions) {} ++ ++// ApplyToFilterOptions implements FilterOption ++func (f NamespaceFilter) ApplyToFilterOptions(target *FilterOptions) { ++ target.ObjectFilters = append(target.ObjectFilters, f) ++} +diff --git a/pkg/filter/options.go b/pkg/filter/options.go +index 4a831dd..6608da3 100644 +--- a/pkg/filter/options.go ++++ b/pkg/filter/options.go +@@ -1,27 +1,56 @@ + package filter + +-// ListOptions is a generic struct for listing options. +-type ListOptions struct { +- // Filters contains a chain of ListFilters, which will be processed in order and pipe the +- // available objects through before returning. +- Filters []ListFilter ++import "sigs.k8s.io/controller-runtime/pkg/client" ++ ++// FilterOption is an interface for implementations that know how to ++// mutate FilterOptions. ++type FilterOption interface { ++ // ApplyToFilterOptions applies the configuration of the current object into a target FilterOptions struct. ++ ApplyToFilterOptions(target *FilterOptions) + } + +-// ListOption is an interface which can be passed into e.g. List() methods as a variadic-length +-// argument list. +-type ListOption interface { +- // ApplyToListOptions applies the configuration of the current object into a target ListOptions struct. +- ApplyToListOptions(target *ListOptions) error ++// FilterOptions is a set of options for filtering. It implements the ObjectFilter interface ++// itself, so it can be used kind of as a multi-ObjectFilter. ++type FilterOptions struct { ++ // ObjectFilters contains a set of filters for a single object. All of the filters must return ++ // true an a nil error for Match(obj) to return (true, nil). ++ ObjectFilters []ObjectFilter + } + +-// MakeListOptions makes a completed ListOptions struct from a list of ListOption implementations. +-func MakeListOptions(opts ...ListOption) (*ListOptions, error) { +- o := &ListOptions{} +- for _, opt := range opts { +- // For every option, apply it into o, and check if there's an error +- if err := opt.ApplyToListOptions(o); err != nil { +- return nil, err ++// Match matches the object against all the ObjectFilters. ++func (o *FilterOptions) Match(obj client.Object) (bool, error) { ++ for _, filter := range o.ObjectFilters { ++ matched, err := filter.Match(obj) ++ if err != nil { ++ return false, err ++ } ++ if !matched { ++ return false, nil + } + } +- return o, nil ++ return true, nil ++} ++ ++// ApplyToFilterOptions implements FilterOption ++func (o *FilterOptions) ApplyToFilterOptions(target *FilterOptions) { ++ target.ObjectFilters = append(target.ObjectFilters, o.ObjectFilters...) ++} ++ ++// ApplyOptions applies the given FilterOptions to itself and returns itself. ++func (o *FilterOptions) ApplyOptions(opts []FilterOption) *FilterOptions { ++ for _, opt := range opts { ++ opt.ApplyToFilterOptions(o) ++ } ++ return o ++} ++ ++// ApplyOption applies one option that aims to implement FilterOption, ++// but at compile-time maybe does not for sure. This can be used for ++// lists of other Options that possibly implement FilterOption in the ++// following way: for _, opt := range opts { filterOpts.ApplyOption(opt) } ++func (o *FilterOptions) ApplyOption(opt interface{}) *FilterOptions { ++ if fOpt, ok := opt.(FilterOption); ok { ++ fOpt.ApplyToFilterOptions(o) ++ } ++ return o + } +diff --git a/pkg/filter/uid.go b/pkg/filter/uid.go +index eea48ff..1aedab3 100644 +--- a/pkg/filter/uid.go ++++ b/pkg/filter/uid.go +@@ -1,25 +1,23 @@ + package filter + + import ( +- "errors" + "fmt" + "strings" + +- "github.com/weaveworks/libgitops/pkg/runtime" + "k8s.io/apimachinery/pkg/types" ++ "sigs.k8s.io/controller-runtime/pkg/client" + ) + +-var ( +- // ErrInvalidFilterParams describes an error where invalid parameters were given +- // to a filter. +- ErrInvalidFilterParams = errors.New("invalid parameters given to filter") +-) +- +-// UIDFilter implements ObjectFilter and ListOption. ++// UIDFilter implements ObjectFilter and FilterOption. ++// It also implements client.{List,DeleteAllOf}Option so ++// it can be passed into client.Client.{List,DeleteAllOf} ++// as a way to conveniently filter those lists. + var _ ObjectFilter = UIDFilter{} +-var _ ListOption = UIDFilter{} ++var _ FilterOption = UIDFilter{} ++var _ client.ListOption = UIDFilter{} ++var _ client.DeleteAllOfOption = UIDFilter{} + +-// UIDFilter is an ObjectFilter that compares runtime.Object.GetUID() to ++// UIDFilter is an ObjectFilter that compares Object.GetUID() to + // the UID field by either equality or prefix. The UID field is required, + // otherwise ErrInvalidFilterParams is returned. + type UIDFilter struct { +@@ -31,8 +29,8 @@ type UIDFilter struct { + MatchPrefix bool + } + +-// Filter implements ObjectFilter +-func (f UIDFilter) Filter(obj runtime.Object) (bool, error) { ++// Match implements ObjectFilter ++func (f UIDFilter) Match(obj client.Object) (bool, error) { + // Require f.UID to always be set. + if len(f.UID) == 0 { + return false, fmt.Errorf("the UIDFilter.UID field must not be empty: %w", ErrInvalidFilterParams) +@@ -45,9 +43,12 @@ func (f UIDFilter) Filter(obj runtime.Object) (bool, error) { + return f.UID == obj.GetUID(), nil + } + +-// ApplyToListOptions implements ListOption, and adds itself converted to +-// a ListFilter to ListOptions.Filters. +-func (f UIDFilter) ApplyToListOptions(target *ListOptions) error { +- target.Filters = append(target.Filters, ObjectToListFilter(f)) +- return nil ++// ApplyToList implements client.ListOption, but is just a "dummy" implementation in order to implement ++// the interface, so that this struct can be passed to client.Reader.List() ++func (f UIDFilter) ApplyToList(_ *client.ListOptions) {} ++func (f UIDFilter) ApplyToDeleteAllOf(_ *client.DeleteAllOfOptions) {} ++ ++// ApplyToFilterOptions implements FilterOption ++func (f UIDFilter) ApplyToFilterOptions(target *FilterOptions) { ++ target.ObjectFilters = append(target.ObjectFilters, f) + } +diff --git a/pkg/gitdir/gitdir.go b/pkg/gitdir/gitdir.go +deleted file mode 100644 +index a9eb0b7..0000000 +--- a/pkg/gitdir/gitdir.go ++++ /dev/null +@@ -1,474 +0,0 @@ +-package gitdir +- +-import ( +- "context" +- "errors" +- "fmt" +- "io/ioutil" +- "os" +- "sync" +- "time" +- +- "github.com/fluxcd/go-git-providers/gitprovider" +- git "github.com/go-git/go-git/v5" +- "github.com/go-git/go-git/v5/plumbing" +- "github.com/go-git/go-git/v5/plumbing/object" +- log "github.com/sirupsen/logrus" +- "k8s.io/apimachinery/pkg/util/wait" +-) +- +-var ( +- // ErrNotStarted happens if you try to operate on the gitDirectory before you have started +- // it with StartCheckoutLoop. +- ErrNotStarted = errors.New("the gitDirectory hasn't been started (and hence, cloned) yet") +- // ErrCannotWriteToReadOnly happens if you try to do a write operation for a non-authenticated Git repo. +- ErrCannotWriteToReadOnly = errors.New("the gitDirectory is read-only, cannot write") +-) +- +-const ( +- defaultBranch = "master" +- defaultRemote = "origin" +- defaultInterval = 30 * time.Second +- defaultTimeout = 1 * time.Minute +-) +- +-// GitDirectoryOptions provides options for the gitDirectory. +-// TODO: Refactor this into the controller-runtime Options factory pattern. +-type GitDirectoryOptions struct { +- // Options +- Branch string // default "master" +- Interval time.Duration // default 30s +- Timeout time.Duration // default 1m +- // TODO: Support folder prefixes +- +- // Authentication +- AuthMethod AuthMethod +-} +- +-func (o *GitDirectoryOptions) Default() { +- if o.Branch == "" { +- o.Branch = defaultBranch +- } +- if o.Interval == 0 { +- o.Interval = defaultInterval +- } +- if o.Timeout == 0 { +- o.Timeout = defaultTimeout +- } +-} +- +-// GitDirectory is an abstraction layer for a temporary Git clone. It pulls +-// and checks out new changes periodically in the background. It also allows +-// high-level access to write operations, like creating a new branch, committing, +-// and pushing. +-type GitDirectory interface { +- // Dir returns the backing temporary directory of the git clone. +- Dir() string +- // MainBranch returns the configured main branch. +- MainBranch() string +- // RepositoryRef returns the repository reference. +- RepositoryRef() gitprovider.RepositoryRef +- +- // StartCheckoutLoop clones the repo synchronously, and then starts the checkout loop non-blocking. +- // If the checkout loop has been started already, this is a no-op. +- StartCheckoutLoop() error +- // Suspend waits for any pending transactions or operations, and then locks the internal mutex so that +- // no other operations can start. This means the periodic background checkout loop will momentarily stop. +- Suspend() +- // Resume unlocks the mutex locked in Suspend(), so that other Git operations, like the background checkout +- // loop can resume its operation. +- Resume() +- +- // Pull performs a pull & checkout to the latest revision. +- // ErrNotStarted is returned if the repo hasn't been cloned yet. +- Pull(ctx context.Context) error +- +- // CheckoutNewBranch creates a new branch and checks out to it. +- // ErrNotStarted is returned if the repo hasn't been cloned yet. +- CheckoutNewBranch(branchName string) error +- // CheckoutMainBranch goes back to the main branch. +- // ErrNotStarted is returned if the repo hasn't been cloned yet. +- CheckoutMainBranch() error +- +- // Commit creates a commit of all changes in the current worktree with the given parameters. +- // It also automatically pushes the branch after the commit. +- // ErrNotStarted is returned if the repo hasn't been cloned yet. +- // ErrCannotWriteToReadOnly is returned if opts.AuthMethod wasn't provided. +- Commit(ctx context.Context, authorName, authorEmail, msg string) error +- // CommitChannel is a channel to where new observed Git SHAs are written. +- CommitChannel() chan string +- +- // Cleanup terminates any pending operations, and removes the temporary directory. +- Cleanup() error +-} +- +-// Create a new GitDirectory implementation. In order to start using this, run StartCheckoutLoop(). +-func NewGitDirectory(repoRef gitprovider.RepositoryRef, opts GitDirectoryOptions) (GitDirectory, error) { +- log.Info("Initializing the Git repo...") +- +- // Default the options +- opts.Default() +- +- // Create a temporary directory for the clone +- tmpDir, err := ioutil.TempDir("", "libgitops") +- if err != nil { +- return nil, err +- } +- log.Debugf("Created temporary directory for the git clone at %q", tmpDir) +- +- d := &gitDirectory{ +- repoRef: repoRef, +- GitDirectoryOptions: opts, +- cloneDir: tmpDir, +- // TODO: This needs to be large, otherwise it can start blocking unnecessarily if nobody reads it +- commitChan: make(chan string, 1024), +- lock: &sync.Mutex{}, +- } +- // Set up the parent context for this class. d.cancel() is called only at Cleanup() +- d.ctx, d.cancel = context.WithCancel(context.Background()) +- +- log.Trace("URL endpoint parsed and authentication method chosen") +- +- if d.canWrite() { +- log.Infof("Running in read-write mode, will commit back current status to the repo") +- } else { +- log.Infof("Running in read-only mode, won't write status back to the repo") +- } +- +- return d, nil +-} +- +-// gitDirectory is an implementation which keeps a directory +-type gitDirectory struct { +- // user-specified options +- repoRef gitprovider.RepositoryRef +- GitDirectoryOptions +- +- // the temporary directory used for the clone +- cloneDir string +- +- // go-git objects. wt is the worktree of the repo, persistent during the lifetime of repo. +- repo *git.Repository +- wt *git.Worktree +- +- // latest known commit to the system +- lastCommit string +- // events channel from new commits +- commitChan chan string +- +- // the context and its cancel function for the lifetime of this struct (until Cleanup()) +- ctx context.Context +- cancel context.CancelFunc +- // the lock for git operations (so pushing and pulling aren't done simultaneously) +- lock *sync.Mutex +-} +- +-func (d *gitDirectory) Dir() string { +- return d.cloneDir +-} +- +-func (d *gitDirectory) MainBranch() string { +- return d.Branch +-} +- +-func (d *gitDirectory) RepositoryRef() gitprovider.RepositoryRef { +- return d.repoRef +-} +- +-// StartCheckoutLoop clones the repo synchronously, and then starts the checkout loop non-blocking. +-// If the checkout loop has been started already, this is a no-op. +-func (d *gitDirectory) StartCheckoutLoop() error { +- if d.wt != nil { +- return nil // already initialized +- } +- // First, clone the repo +- if err := d.clone(); err != nil { +- return err +- } +- go d.checkoutLoop() +- return nil +-} +- +-func (d *gitDirectory) Suspend() { +- d.lock.Lock() +-} +- +-func (d *gitDirectory) Resume() { +- d.lock.Unlock() +-} +- +-func (d *gitDirectory) CommitChannel() chan string { +- return d.commitChan +-} +- +-func (d *gitDirectory) checkoutLoop() { +- log.Info("Starting the checkout loop...") +- +- wait.NonSlidingUntilWithContext(d.ctx, func(_ context.Context) { +- +- log.Trace("checkoutLoop: Will perform pull operation") +- // Perform a pull & checkout of the new revision +- if err := d.Pull(d.ctx); err != nil { +- log.Errorf("checkoutLoop: git pull failed with error: %v", err) +- return +- } +- +- }, d.Interval) +- log.Info("Exiting the checkout loop...") +-} +- +-func (d *gitDirectory) cloneURL() string { +- return d.repoRef.GetCloneURL(d.AuthMethod.TransportType()) +-} +- +-func (d *gitDirectory) canWrite() bool { +- return d.AuthMethod != nil +-} +- +-// verifyRead makes sure it's ok to start a read-something-from-git process +-func (d *gitDirectory) verifyRead() error { +- // Safeguard against not starting yet +- if d.wt == nil { +- return fmt.Errorf("cannot pull: %w", ErrNotStarted) +- } +- return nil +-} +- +-// verifyWrite makes sure it's ok to start a write-something-to-git process +-func (d *gitDirectory) verifyWrite() error { +- // We need all read privileges first +- if err := d.verifyRead(); err != nil { +- return err +- } +- // Make sure we don't write to a possibly read-only repo +- if !d.canWrite() { +- return ErrCannotWriteToReadOnly +- } +- return nil +-} +- +-func (d *gitDirectory) clone() error { +- // Lock the mutex now that we're starting, and unlock it when exiting +- d.lock.Lock() +- defer d.lock.Unlock() +- +- log.Infof("Starting to clone the repository %s with timeout %s", d.repoRef, d.Timeout) +- // Do a clone operation to the temporary directory, with a timeout +- err := d.contextWithTimeout(d.ctx, func(ctx context.Context) error { +- var err error +- d.repo, err = git.PlainCloneContext(ctx, d.Dir(), false, &git.CloneOptions{ +- URL: d.cloneURL(), +- Auth: d.AuthMethod, +- RemoteName: defaultRemote, +- ReferenceName: plumbing.NewBranchReferenceName(d.Branch), +- SingleBranch: true, +- NoCheckout: false, +- //Depth: 1, // ref: https://github.com/src-d/go-git/issues/1143 +- RecurseSubmodules: 0, +- Progress: nil, +- Tags: git.NoTags, +- }) +- return err +- }) +- // Handle errors +- switch err { +- case nil: +- // no-op, just continue. +- case context.DeadlineExceeded: +- return fmt.Errorf("git clone operation took longer than deadline %s", d.Timeout) +- case context.Canceled: +- log.Tracef("context was cancelled") +- return nil // if Cleanup() was called, just exit the goroutine +- default: +- return fmt.Errorf("git clone error: %v", err) +- } +- +- // Populate the worktree pointer +- d.wt, err = d.repo.Worktree() +- if err != nil { +- return fmt.Errorf("git get worktree error: %v", err) +- } +- +- // Get the latest HEAD commit and report it to the user +- ref, err := d.repo.Head() +- if err != nil { +- return err +- } +- +- d.observeCommit(ref.Hash()) +- return nil +-} +- +-func (d *gitDirectory) Pull(ctx context.Context) error { +- // Lock the mutex now that we're starting, and unlock it when exiting +- d.lock.Lock() +- defer d.lock.Unlock() +- +- // Make sure it's okay to read +- if err := d.verifyRead(); err != nil { +- return err +- } +- +- // Perform the git pull operation using the timeout +- err := d.contextWithTimeout(ctx, func(innerCtx context.Context) error { +- log.Trace("checkoutLoop: Starting pull operation") +- return d.wt.PullContext(innerCtx, &git.PullOptions{ +- Auth: d.AuthMethod, +- SingleBranch: true, +- }) +- }) +- // Handle errors +- switch err { +- case nil, git.NoErrAlreadyUpToDate: +- // no-op, just continue. Allow the git.NoErrAlreadyUpToDate error +- case context.DeadlineExceeded: +- return fmt.Errorf("git pull operation took longer than deadline %s", d.Timeout) +- case context.Canceled: +- log.Tracef("context was cancelled") +- return nil // if Cleanup() was called, just exit the goroutine +- default: +- return fmt.Errorf("failed to pull: %v", err) +- } +- +- log.Trace("checkoutLoop: Pulled successfully") +- +- // get current head +- ref, err := d.repo.Head() +- if err != nil { +- return err +- } +- +- // check if we changed commits +- if d.lastCommit != ref.Hash().String() { +- // Notify upstream that we now have a new commit, and allow writing again +- d.observeCommit(ref.Hash()) +- } +- +- return nil +-} +- +-func (d *gitDirectory) CheckoutNewBranch(branchName string) error { +- // Make sure it's okay to write +- if err := d.verifyWrite(); err != nil { +- return err +- } +- +- return d.wt.Checkout(&git.CheckoutOptions{ +- Branch: plumbing.NewBranchReferenceName(branchName), +- Create: true, +- }) +-} +- +-func (d *gitDirectory) CheckoutMainBranch() error { +- // Make sure it's okay to write +- if err := d.verifyWrite(); err != nil { +- return err +- } +- +- // Best-effort clean +- _ = d.wt.Clean(&git.CleanOptions{ +- Dir: true, +- }) +- // Force-checkout the main branch +- return d.wt.Checkout(&git.CheckoutOptions{ +- Branch: plumbing.NewBranchReferenceName(d.Branch), +- Force: true, +- }) +-} +- +-// observeCommit sets the lastCommit variable so that we know the latest state +-func (d *gitDirectory) observeCommit(commit plumbing.Hash) { +- d.lastCommit = commit.String() +- d.commitChan <- commit.String() +- log.Infof("New commit observed on branch %q: %s", d.Branch, commit) +-} +- +-// Commit creates a commit of all changes in the current worktree with the given parameters. +-// It also automatically pushes the branch after the commit. +-// ErrNotStarted is returned if the repo hasn't been cloned yet. +-// ErrCannotWriteToReadOnly is returned if opts.AuthMethod wasn't provided. +-func (d *gitDirectory) Commit(ctx context.Context, authorName, authorEmail, msg string) error { +- // Make sure it's okay to write +- if err := d.verifyWrite(); err != nil { +- return err +- } +- +- s, err := d.wt.Status() +- if err != nil { +- return fmt.Errorf("git status failed: %v", err) +- } +- if s.IsClean() { +- log.Debugf("No changed files in git repo, nothing to commit...") +- return nil +- } +- +- // Do a commit and push +- log.Debug("commitLoop: Committing all local changes") +- hash, err := d.wt.Commit(msg, &git.CommitOptions{ +- All: true, +- Author: &object.Signature{ +- Name: authorName, +- Email: authorEmail, +- When: time.Now(), +- }, +- }) +- if err != nil { +- return fmt.Errorf("git commit error: %v", err) +- } +- +- // Perform the git push operation using the timeout +- err = d.contextWithTimeout(ctx, func(innerCtx context.Context) error { +- log.Debug("commitLoop: Will push with timeout") +- return d.repo.PushContext(innerCtx, &git.PushOptions{ +- Auth: d.AuthMethod, +- }) +- }) +- // Handle errors +- switch err { +- case nil, git.NoErrAlreadyUpToDate: +- // no-op, just continue. Allow the git.NoErrAlreadyUpToDate error +- case context.DeadlineExceeded: +- return fmt.Errorf("git push operation took longer than deadline %s", d.Timeout) +- case context.Canceled: +- log.Tracef("context was cancelled") +- return nil // if Cleanup() was called, just exit the goroutine +- default: +- return fmt.Errorf("failed to push: %v", err) +- } +- +- // Notify upstream that we now have a new commit, and allow writing again +- log.Infof("A new commit with the actual state has been created and pushed to the origin: %q", hash) +- d.observeCommit(hash) +- return nil +-} +- +-func (d *gitDirectory) contextWithTimeout(ctx context.Context, fn func(context.Context) error) error { +- // Create a new context with a timeout. The push operation either succeeds in time, times out, +- // or is cancelled by Cleanup(). In case of a successful run, the context is always cancelled afterwards. +- ctx, cancel := context.WithTimeout(ctx, d.Timeout) +- defer cancel() +- +- // Run the function using the context and cancel directly afterwards +- fnErr := fn(ctx) +- +- // Return the context error, if any, first so deadline/cancel signals can propagate. +- // Otherwise passthrough the error returned from the function. +- if ctx.Err() != nil { +- log.Debugf("operation context yielded error %v to be returned. Function error was: %v", ctx.Err(), fnErr) +- return ctx.Err() +- } +- return fnErr +-} +- +-// Cleanup cancels running goroutines and operations, and removes the temporary clone directory +-func (d *gitDirectory) Cleanup() error { +- // Cancel the context for the two running goroutines, and any possible long-running operations +- d.cancel() +- +- // Remove the temporary directory +- if err := os.RemoveAll(d.Dir()); err != nil { +- log.Errorf("Failed to clean up temp git directory: %v", err) +- return err +- } +- return nil +-} +diff --git a/pkg/runtime/doc.go b/pkg/runtime/doc.go +deleted file mode 100644 +index 4eb2a1e..0000000 +--- a/pkg/runtime/doc.go ++++ /dev/null +@@ -1,2 +0,0 @@ +-// +k8s:deepcopy-gen=package +-package runtime +diff --git a/pkg/runtime/identifiers.go b/pkg/runtime/identifiers.go +deleted file mode 100644 +index 87bc00e..0000000 +--- a/pkg/runtime/identifiers.go ++++ /dev/null +@@ -1,63 +0,0 @@ +-package runtime +- +-import ( +- "fmt" +- +- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +-) +- +-// DefaultNamespace describes the default namespace name used for the system. +-const DefaultNamespace = "default" +- +-// Identifyable is an object which can be identified +-type Identifyable interface { +- // GetIdentifier can return e.g. a "namespace/name" combination, which is not guaranteed +- // to be unique world-wide, or alternatively a random SHA for instance +- GetIdentifier() string +-} +- +-type identifier string +- +-func (i identifier) GetIdentifier() string { return string(i) } +- +-type Metav1NameIdentifierFactory struct{} +- +-func (id Metav1NameIdentifierFactory) Identify(o interface{}) (Identifyable, bool) { +- switch obj := o.(type) { +- case metav1.Object: +- if len(obj.GetNamespace()) == 0 || len(obj.GetName()) == 0 { +- return nil, false +- } +- return NewIdentifier(fmt.Sprintf("%s/%s", obj.GetNamespace(), obj.GetName())), true +- } +- return nil, false +-} +- +-type ObjectUIDIdentifierFactory struct{} +- +-func (id ObjectUIDIdentifierFactory) Identify(o interface{}) (Identifyable, bool) { +- switch obj := o.(type) { +- case Object: +- if len(obj.GetUID()) == 0 { +- return nil, false +- } +- // TODO: Make sure that runtime.APIType works with this +- return NewIdentifier(string(obj.GetUID())), true +- } +- return nil, false +-} +- +-var ( +- // Metav1Identifier identifies an object using its metav1.ObjectMeta Name and Namespace +- Metav1NameIdentifier IdentifierFactory = Metav1NameIdentifierFactory{} +- // ObjectUIDIdentifier identifies an object using its libgitops/pkg/runtime.ObjectMeta UID field +- ObjectUIDIdentifier IdentifierFactory = ObjectUIDIdentifierFactory{} +-) +- +-func NewIdentifier(str string) Identifyable { +- return identifier(str) +-} +- +-type IdentifierFactory interface { +- Identify(o interface{}) (id Identifyable, ok bool) +-} +diff --git a/pkg/runtime/meta.go b/pkg/runtime/meta.go +deleted file mode 100644 +index 32930e1..0000000 +--- a/pkg/runtime/meta.go ++++ /dev/null +@@ -1,52 +0,0 @@ +-package runtime +- +-import ( +- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +- "k8s.io/apimachinery/pkg/runtime" +- "sigs.k8s.io/yaml" +-) +- +-// PartialObjectImpl is a struct implementing PartialObject, used for +-// unmarshalling unknown objects into this intermediate type +-// where .Name, .UID, .Kind and .APIVersion become easily available +-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +-type PartialObjectImpl struct { +- metav1.TypeMeta `json:",inline"` +- metav1.ObjectMeta `json:"metadata"` +-} +- +-func (po *PartialObjectImpl) IsPartialObject() {} +- +-// This constructor ensures the PartialObjectImpl fields are not nil. +-// TODO: Make this multi-document-aware? +-func NewPartialObject(frame []byte) (PartialObject, error) { +- obj := &PartialObjectImpl{} +- +- // The yaml package supports both YAML and JSON. Don't use the serializer, as the APIType +- // wrapper is not registered in any scheme. +- if err := yaml.Unmarshal(frame, obj); err != nil { +- return nil, err +- } +- +- return obj, nil +-} +- +-var _ Object = &PartialObjectImpl{} +-var _ PartialObject = &PartialObjectImpl{} +- +-// Object is an union of the Object interfaces that are accessible for a +-// type that embeds both metav1.TypeMeta and metav1.ObjectMeta. +-type Object interface { +- runtime.Object +- metav1.ObjectMetaAccessor +- metav1.Object +-} +- +-// PartialObject is a partially-decoded object, where only metadata has been loaded. +-type PartialObject interface { +- Object +- +- // IsPartialObject is a dummy function for signalling that this is a partially-loaded object +- // i.e. only TypeMeta and ObjectMeta are stored in memory. +- IsPartialObject() +-} +diff --git a/pkg/runtime/zz_generated.deepcopy.go b/pkg/runtime/zz_generated.deepcopy.go +deleted file mode 100644 +index 20beb72..0000000 +--- a/pkg/runtime/zz_generated.deepcopy.go ++++ /dev/null +@@ -1,67 +0,0 @@ +-// +build !ignore_autogenerated +- +-// Code generated by deepcopy-gen. DO NOT EDIT. +- +-package runtime +- +-import ( +- pkgruntime "k8s.io/apimachinery/pkg/runtime" +-) +- +-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +-func (in *Metav1NameIdentifierFactory) DeepCopyInto(out *Metav1NameIdentifierFactory) { +- *out = *in +- return +-} +- +-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metav1NameIdentifierFactory. +-func (in *Metav1NameIdentifierFactory) DeepCopy() *Metav1NameIdentifierFactory { +- if in == nil { +- return nil +- } +- out := new(Metav1NameIdentifierFactory) +- in.DeepCopyInto(out) +- return out +-} +- +-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +-func (in *ObjectUIDIdentifierFactory) DeepCopyInto(out *ObjectUIDIdentifierFactory) { +- *out = *in +- return +-} +- +-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectUIDIdentifierFactory. +-func (in *ObjectUIDIdentifierFactory) DeepCopy() *ObjectUIDIdentifierFactory { +- if in == nil { +- return nil +- } +- out := new(ObjectUIDIdentifierFactory) +- in.DeepCopyInto(out) +- return out +-} +- +-// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +-func (in *PartialObjectImpl) DeepCopyInto(out *PartialObjectImpl) { +- *out = *in +- out.TypeMeta = in.TypeMeta +- in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) +- return +-} +- +-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectImpl. +-func (in *PartialObjectImpl) DeepCopy() *PartialObjectImpl { +- if in == nil { +- return nil +- } +- out := new(PartialObjectImpl) +- in.DeepCopyInto(out) +- return out +-} +- +-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new pkgruntime.Object. +-func (in *PartialObjectImpl) DeepCopyObject() pkgruntime.Object { +- if c := in.DeepCopy(); c != nil { +- return c +- } +- return nil +-} +diff --git a/pkg/serializer/comments.go b/pkg/serializer/comments.go +index 302c4db..a016939 100644 +--- a/pkg/serializer/comments.go ++++ b/pkg/serializer/comments.go +@@ -27,7 +27,7 @@ var ( + func (d *decoder) tryToPreserveComments(doc []byte, obj runtime.Object, ct ContentType) { + // If the user opted into preserving comments and the format is YAML, proceed + // If they didn't, return directly +- if !(*d.opts.PreserveComments && ct == ContentTypeYAML) { ++ if !(d.opts.PreserveComments == PreserveCommentsStrict && ct == ContentTypeYAML) { + return + } + +@@ -41,7 +41,7 @@ func (d *decoder) tryToPreserveComments(doc []byte, obj runtime.Object, ct Conte + // tryToPreserveComments tries to locate the possibly-saved original file data in the object's annotation + func (e *encoder) encodeWithCommentSupport(versionEncoder runtime.Encoder, fw FrameWriter, obj runtime.Object, metaObj metav1.Object) error { + // If the user did not opt into preserving comments, just sanitize ObjectMeta temporarily and and return +- if !*e.opts.PreserveComments { ++ if e.opts.PreserveComments == PreserveCommentsDisable { + // Normal encoding without the annotation (so it doesn't leak by accident) + return noAnnotationWrapper(metaObj, e.normalEncodeFunc(versionEncoder, fw, obj)) + } +diff --git a/pkg/serializer/comments_test.go b/pkg/serializer/comments_test.go +index 8f4c65c..6332e5c 100644 +--- a/pkg/serializer/comments_test.go ++++ b/pkg/serializer/comments_test.go +@@ -18,8 +18,8 @@ kind: Test + spec: + # Head comment + data: +- - field # Inline comment +- - another ++ - field # Inline comment ++ - another + thing: + # Head comment + var: true +@@ -29,9 +29,9 @@ const sampleData2 = `kind: Test + spec: + # Head comment + data: +- - field # Inline comment +- - another: +- subthing: "yes" ++ - field # Inline comment ++ - another: ++ subthing: "yes" + thing: + # Head comment + var: true +diff --git a/pkg/serializer/convertor.go b/pkg/serializer/convertor.go +index bdea096..3fbc814 100644 +--- a/pkg/serializer/convertor.go ++++ b/pkg/serializer/convertor.go +@@ -169,7 +169,8 @@ func (c *objectConvertor) ConvertToVersion(in runtime.Object, groupVersioner run + // as before, using the scheme's ConvertToVersion function. But if we don't want to convert the newly-decoded + // external object, we can just do nothing and the object will stay unconverted. + // doConversion is always true in the Encode codepath. +- if !c.doConversion { ++ // Also, never convert unknown, partial metadata or unstructured objects (defined as "non-convertible"). ++ if !c.doConversion || IsNonConvertible(in) { + // DeepCopy the object to make sure that although in would be somehow modified, it doesn't affect out + return in.DeepCopyObject(), nil + } +diff --git a/pkg/serializer/decode.go b/pkg/serializer/decode.go +index 4feff21..7aee5af 100644 +--- a/pkg/serializer/decode.go ++++ b/pkg/serializer/decode.go +@@ -5,119 +5,38 @@ import ( + "io" + "reflect" + +- "github.com/weaveworks/libgitops/pkg/util" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/json" + "k8s.io/apimachinery/pkg/runtime/serializer/versioning" +- "sigs.k8s.io/yaml" ++ serializeryaml "k8s.io/apimachinery/pkg/runtime/serializer/yaml" + ) + + // This is the groupversionkind for the v1.List object + var listGVK = metav1.Unversioned.WithKind("List") + +-type DecodingOptions struct { +- // Not applicable for Decoder.DecodeInto(). If true, the decoded external object +- // will be converted into its hub (or internal, where applicable) representation. Otherwise, the decoded +- // object will be left in its external representation. (Default: false) +- ConvertToHub *bool +- +- // Parse the YAML/JSON in strict mode, returning a specific error if the input +- // contains duplicate or unknown fields or formatting errors. (Default: true) +- Strict *bool +- +- // Automatically default the decoded object. (Default: false) +- Default *bool +- +- // Only applicable for Decoder.DecodeAll(). If the underlying data contains a v1.List, +- // the items of the list will be traversed, decoded into their respective types, and +- // appended to the returned slice. The v1.List will in this case not be returned. +- // This conversion does NOT support preserving comments. If the given scheme doesn't +- // recognize the v1.List, before using it will be registered automatically. (Default: true) +- DecodeListElements *bool +- +- // Whether to preserve YAML comments internally. This only works for objects embedding metav1.ObjectMeta. +- // Only applicable to ContentTypeYAML framers. +- // Using any other framer will be silently ignored. Usage of this option also requires setting +- // the PreserveComments in EncodingOptions, too. (Default: false) +- PreserveComments *bool +- +- // DecodeUnknown specifies whether decode objects with an unknown GroupVersionKind into a +- // *runtime.Unknown object when running Decode(All) (true value) or to return an error when +- // any unrecognized type is found (false value). (Default: false) +- DecodeUnknown *bool +-} +- +-type DecodingOptionsFunc func(*DecodingOptions) +- +-func WithConvertToHubDecode(convert bool) DecodingOptionsFunc { +- return func(opts *DecodingOptions) { +- opts.ConvertToHub = &convert +- } +-} +- +-func WithStrictDecode(strict bool) DecodingOptionsFunc { +- return func(opts *DecodingOptions) { +- opts.Strict = &strict +- } +-} +- +-func WithDefaultsDecode(defaults bool) DecodingOptionsFunc { +- return func(opts *DecodingOptions) { +- opts.Default = &defaults +- } +-} +- +-func WithListElementsDecoding(listElements bool) DecodingOptionsFunc { +- return func(opts *DecodingOptions) { +- opts.DecodeListElements = &listElements +- } +-} ++// TODO: To think about: should we take in the DecodeOptions at Decode time instead ++// as a variadic-sized Option slice? It would probably take caching the *json.Serializer ++// and runtime.Decoder for the given options they use, though. + +-func WithCommentsDecode(comments bool) DecodingOptionsFunc { +- return func(opts *DecodingOptions) { +- opts.PreserveComments = &comments +- } +-} +- +-func WithUnknownDecode(unknown bool) DecodingOptionsFunc { +- return func(opts *DecodingOptions) { +- opts.DecodeUnknown = &unknown +- } +-} +- +-func WithDecodingOptions(newOpts DecodingOptions) DecodingOptionsFunc { +- return func(opts *DecodingOptions) { +- // TODO: Null-check all of these before using them +- *opts = newOpts +- } +-} ++func newDecoder(schemeAndCodec *schemeAndCodec, opts DecodeOptions) Decoder { ++ // Allow both YAML and JSON inputs (JSON is a subset of YAML), and deserialize in strict mode ++ s := json.NewSerializerWithOptions(json.DefaultMetaFactory, schemeAndCodec.scheme, schemeAndCodec.scheme, json.SerializerOptions{ ++ Yaml: true, ++ Strict: *opts.Strict, ++ }) + +-func defaultDecodeOpts() *DecodingOptions { +- return &DecodingOptions{ +- ConvertToHub: util.BoolPtr(false), +- Strict: util.BoolPtr(true), +- Default: util.BoolPtr(false), +- DecodeListElements: util.BoolPtr(true), +- PreserveComments: util.BoolPtr(false), +- DecodeUnknown: util.BoolPtr(false), +- } +-} ++ decodeCodec := decoderForVersion(schemeAndCodec.scheme, s, *opts.Default, *opts.ConvertToHub) + +-func newDecodeOpts(fns ...DecodingOptionsFunc) *DecodingOptions { +- opts := defaultDecodeOpts() +- for _, fn := range fns { +- fn(opts) +- } +- return opts ++ return &decoder{schemeAndCodec, decodeCodec, opts} + } + + type decoder struct { + *schemeAndCodec + + decoder runtime.Decoder +- opts DecodingOptions ++ opts DecodeOptions + } + + // Decode returns the decoded object from the next document in the FrameReader stream. +@@ -149,8 +68,14 @@ func (d *decoder) Decode(fr FrameReader) (runtime.Object, error) { + func (d *decoder) decode(doc []byte, into runtime.Object, ct ContentType) (runtime.Object, error) { + // If the scheme doesn't recognize a v1.List, and we enabled opts.DecodeListElements, + // make the scheme able to decode the v1.List automatically +- if *d.opts.DecodeListElements && !d.scheme.Recognizes(listGVK) { +- d.scheme.AddKnownTypes(metav1.Unversioned, &metav1.List{}) ++ if *d.opts.DecodeListElements { ++ // As .AddKnownTypes is writing to the scheme, make sure we guard the check and the write with a ++ // mutex. ++ d.schemeMu.Lock() ++ if !d.scheme.Recognizes(listGVK) { ++ d.scheme.AddKnownTypes(metav1.Unversioned, &metav1.List{}) ++ } ++ d.schemeMu.Unlock() + } + + // Record if this decode call should have runtime.DecodeInto-functionality +@@ -268,7 +193,7 @@ func (d *decoder) decodeUnknown(doc []byte, ct ContentType) (runtime.Object, err + + func (d *decoder) handleDecodeError(doc []byte, origErr error) error { + // Parse the document's TypeMeta information +- gvk, err := extractYAMLTypeMeta(doc) ++ gvk, err := serializeryaml.DefaultMetaFactory.Interpret(doc) + if err != nil { + return fmt.Errorf("failed to interpret TypeMeta from the given the YAML: %v. Decode error was: %w", err, origErr) + } +@@ -320,18 +245,6 @@ func (d *decoder) extractNestedObjects(obj runtime.Object, ct ContentType) ([]ru + return objs, nil + } + +-func newDecoder(schemeAndCodec *schemeAndCodec, opts DecodingOptions) Decoder { +- // Allow both YAML and JSON inputs (JSON is a subset of YAML), and deserialize in strict mode +- s := json.NewSerializerWithOptions(json.DefaultMetaFactory, schemeAndCodec.scheme, schemeAndCodec.scheme, json.SerializerOptions{ +- Yaml: true, +- Strict: *opts.Strict, +- }) +- +- decodeCodec := decoderForVersion(schemeAndCodec.scheme, s, *opts.Default, *opts.ConvertToHub) +- +- return &decoder{schemeAndCodec, decodeCodec, opts} +-} +- + // decoderForVersion is used instead of CodecFactory.DecoderForVersion, as we want to use our own converter + func decoderForVersion(scheme *runtime.Scheme, decoder *json.Serializer, doDefaulting, doConversion bool) runtime.Decoder { + return newConversionCodecForScheme( +@@ -361,20 +274,38 @@ func newConversionCodecForScheme( + defaulter = scheme + } + convertor := newObjectConvertor(scheme, performConversion) +- return versioning.NewCodec(encoder, decoder, convertor, scheme, scheme, defaulter, encodeVersion, decodeVersion, scheme.Name()) ++ // a typer that recognizes metav1.PartialObjectMetadata{,List} ++ typer := &customTyper{scheme} ++ return versioning.NewCodec(encoder, decoder, convertor, scheme, typer, defaulter, encodeVersion, decodeVersion, scheme.Name()) + } + +-// TODO: Use https://github.com/kubernetes/apimachinery/blob/master/pkg/runtime/serializer/yaml/meta.go +-// when we can assume everyone is vendoring k8s v1.19 +-func extractYAMLTypeMeta(data []byte) (*schema.GroupVersionKind, error) { +- typeMeta := runtime.TypeMeta{} +- if err := yaml.Unmarshal(data, &typeMeta); err != nil { +- return nil, fmt.Errorf("could not interpret GroupVersionKind: %w", err) +- } +- gv, err := schema.ParseGroupVersion(typeMeta.APIVersion) +- if err != nil { +- return nil, err ++var _ runtime.ObjectTyper = &customTyper{} ++ ++type customTyper struct { ++ scheme *runtime.Scheme ++} ++ ++// ObjectKinds is an extension to the native Scheme.ObjectKinds function, that also ++// recognizes partial matadata objects and lists. The logic here follows closely the ++// scheme's own logic. ++func (t *customTyper) ObjectKinds(obj runtime.Object) ([]schema.GroupVersionKind, bool, error) { ++ // partial objects are always fine to encode/decode as-is when GVK is set. ++ // this similar code exists in runtime.Scheme.ObjectKinds for reference. ++ if IsPartialObject(obj) || IsPartialObjectList(obj) { ++ // we require that the GVK be populated in order to recognize the object ++ gvk := obj.GetObjectKind().GroupVersionKind() ++ if len(gvk.Kind) == 0 { ++ return nil, false, runtime.NewMissingKindErr("unstructured object has no kind") ++ } ++ if len(gvk.Version) == 0 { ++ return nil, false, runtime.NewMissingVersionErr("unstructured object has no version") ++ } ++ return []schema.GroupVersionKind{gvk}, false, nil + } +- gvk := gv.WithKind(typeMeta.Kind) +- return &gvk, nil ++ return t.scheme.ObjectKinds(obj) ++} ++ ++// Recognizes just calls the underlying Scheme.Recognizes ++func (t *customTyper) Recognizes(gvk schema.GroupVersionKind) bool { ++ return t.scheme.Recognizes(gvk) + } +diff --git a/pkg/serializer/encode.go b/pkg/serializer/encode.go +index 7706193..a06bd8c 100644 +--- a/pkg/serializer/encode.go ++++ b/pkg/serializer/encode.go +@@ -1,73 +1,25 @@ + package serializer + + import ( +- "github.com/sirupsen/logrus" +- "github.com/weaveworks/libgitops/pkg/util" ++ "bytes" ++ "encoding/json" ++ "strings" ++ + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + ) + +-type EncodingOptions struct { +- // Use pretty printing when writing to the output. (Default: true) +- // TODO: Fix that sometimes omitempty fields aren't respected +- Pretty *bool +- // Whether to preserve YAML comments internally. This only works for objects embedding metav1.ObjectMeta. +- // Only applicable to ContentTypeYAML framers. +- // Using any other framer will be silently ignored. Usage of this option also requires setting +- // the PreserveComments in DecodingOptions, too. (Default: false) +- // TODO: Make this a BestEffort & Strict mode +- PreserveComments *bool +- +- // TODO: Maybe consider an option to always convert to the preferred version (not just internal) +-} +- +-type EncodingOptionsFunc func(*EncodingOptions) +- +-func WithPrettyEncode(pretty bool) EncodingOptionsFunc { +- return func(opts *EncodingOptions) { +- opts.Pretty = &pretty +- } +-} +- +-func WithCommentsEncode(comments bool) EncodingOptionsFunc { +- return func(opts *EncodingOptions) { +- opts.PreserveComments = &comments +- } +-} +- +-func WithEncodingOptions(newOpts EncodingOptions) EncodingOptionsFunc { +- return func(opts *EncodingOptions) { +- // TODO: Null-check all of these before using them +- *opts = newOpts +- } +-} +- +-func defaultEncodeOpts() *EncodingOptions { +- return &EncodingOptions{ +- Pretty: util.BoolPtr(true), +- PreserveComments: util.BoolPtr(false), +- } +-} +- +-func newEncodeOpts(fns ...EncodingOptionsFunc) *EncodingOptions { +- opts := defaultEncodeOpts() +- for _, fn := range fns { +- fn(opts) ++func newEncoder(schemeAndCodec *schemeAndCodec, opts EncodeOptions) Encoder { ++ return &encoder{ ++ schemeAndCodec, ++ opts, + } +- return opts + } + + type encoder struct { + *schemeAndCodec + +- opts EncodingOptions +-} +- +-func newEncoder(schemeAndCodec *schemeAndCodec, opts EncodingOptions) Encoder { +- return &encoder{ +- schemeAndCodec, +- opts, +- } ++ opts EncodeOptions + } + + // Encode encodes the given objects and writes them to the specified FrameWriter. +@@ -75,6 +27,7 @@ func newEncoder(schemeAndCodec *schemeAndCodec, opts EncodingOptions) Encoder { + // internal object given to the preferred external groupversion. No conversion will happen + // if the given object is of an external version. + // TODO: This should automatically convert to the preferred version ++// TODO: Fix that sometimes omitempty fields aren't respected + func (e *encoder) Encode(fw FrameWriter, objs ...runtime.Object) error { + for _, obj := range objs { + // Get the kind for the given object +@@ -110,23 +63,23 @@ func (e *encoder) EncodeForGroupVersion(fw FrameWriter, obj runtime.Object, gv s + return ErrUnsupportedContentType + } + +- // Choose the pretty or non-pretty one ++ // Choose the default, non-pretty serializer, as we prettify if needed later ++ // We technically could use the JSON PrettySerializer here, but it does not catch the ++ // cases where the JSON iterator invokes MarshalJSON() on an object, and that object ++ // returns non-pretty bytes (e.g. *unstructured.Unstructured). Hence, it is more robust ++ // and extensible to always use the non-pretty serializer, and only on request indent ++ // a given number of spaces after JSON encoding. + encoder := serializerInfo.Serializer + +- // Use the pretty serializer if it was asked for and is defined for the content type +- if *e.opts.Pretty { +- // Apparently not all SerializerInfos have this field defined (e.g. YAML) +- // TODO: This could be considered a bug in upstream, create an issue +- if serializerInfo.PrettySerializer != nil { +- encoder = serializerInfo.PrettySerializer +- } else { +- logrus.Debugf("PrettySerializer for ContentType %s is nil, falling back to Serializer.", fw.ContentType()) +- } +- } +- + // Get a version-specific encoder for the specified groupversion + versionEncoder := encoderForVersion(e.scheme, encoder, gv) + ++ // Check if the user requested prettified JSON output. ++ // If the ContentType is JSON this is ok, we will intent the encode output on the fly. ++ if *e.opts.JSONIndent > 0 && fw.ContentType() == ContentTypeJSON { ++ fw = &jsonPrettyFrameWriter{indent: *e.opts.JSONIndent, fw: fw} ++ } ++ + // Cast the object to a metav1.Object to get access to annotations + metaobj, ok := toMetaObject(obj) + // For objects without ObjectMeta, the cast will fail. Allow that failure and do "normal" encoding +@@ -150,3 +103,24 @@ func encoderForVersion(scheme *runtime.Scheme, encoder runtime.Encoder, gv schem + true, // convert if needed before encode + ) + } ++ ++type jsonPrettyFrameWriter struct { ++ indent int ++ fw FrameWriter ++} ++ ++func (w *jsonPrettyFrameWriter) Write(p []byte) (n int, err error) { ++ // Indent the source bytes ++ var indented bytes.Buffer ++ err = json.Indent(&indented, p, "", strings.Repeat(" ", w.indent)) ++ if err != nil { ++ return ++ } ++ // Write the pretty bytes to the underlying writer ++ n, err = w.fw.Write(indented.Bytes()) ++ return ++} ++ ++func (w *jsonPrettyFrameWriter) ContentType() ContentType { ++ return w.fw.ContentType() ++} +diff --git a/pkg/serializer/frame_reader.go b/pkg/serializer/frame_reader.go +index 26ead8d..a2ba308 100644 +--- a/pkg/serializer/frame_reader.go ++++ b/pkg/serializer/frame_reader.go +@@ -6,6 +6,7 @@ import ( + "io" + "io/ioutil" + "os" ++ "sync" + + "k8s.io/apimachinery/pkg/runtime/serializer/json" + ) +@@ -71,6 +72,7 @@ func NewJSONFrameReader(rc ReadCloser) FrameReader { + func newFrameReader(rc io.ReadCloser, contentType ContentType) *frameReader { + return &frameReader{ + rc: rc, ++ rcMu: &sync.Mutex{}, + bufSize: defaultBufSize, + maxFrameSize: defaultMaxFrameSize, + contentType: contentType, +@@ -79,12 +81,13 @@ func newFrameReader(rc io.ReadCloser, contentType ContentType) *frameReader { + + // frameReader is a FrameReader implementation + type frameReader struct { +- rc io.ReadCloser ++ // the underlying readcloser and the mutex that guards it ++ rc io.ReadCloser ++ rcMu *sync.Mutex ++ + bufSize int + maxFrameSize int + contentType ContentType +- +- // TODO: Maybe add mutexes for thread-safety (so no two goroutines read at the same time) + } + + // ReadFrame reads one frame from the underlying io.Reader. ReadFrame +@@ -93,6 +96,10 @@ type frameReader struct { + // ReadFrame keeps on reading using new calls. ReadFrame might return both data and + // io.EOF. io.EOF will be returned in the final call. + func (rf *frameReader) ReadFrame() (frame []byte, err error) { ++ // Only one actor can read at a time ++ rf.rcMu.Lock() ++ defer rf.rcMu.Unlock() ++ + // Temporary buffer to parts of a frame into + var buf []byte + // How many bytes were read by the read call +@@ -149,6 +156,10 @@ func (rf *frameReader) ContentType() ContentType { + + // Close implements io.Closer and closes the underlying ReadCloser + func (rf *frameReader) Close() error { ++ // Only one actor can access rf.rc at a time ++ rf.rcMu.Lock() ++ defer rf.rcMu.Unlock() ++ + return rf.rc.Close() + } + +@@ -166,3 +177,42 @@ func FromFile(filePath string) ReadCloser { + func FromBytes(content []byte) ReadCloser { + return ioutil.NopCloser(bytes.NewReader(content)) + } ++ ++// NewSingleFrameReader returns a FrameReader for only a single frame of ++// the specified content type. This avoids overhead if it is known that the ++// byte array only contains one frame. The given frame is returned in ++// whole in the first ReadFrame() call, and io.EOF is returned in all future ++// invocations. This FrameReader works for any ContentType and transparently ++// exposes what was given through the ContentType() method. ++func NewSingleFrameReader(b []byte, ct ContentType) FrameReader { ++ return &singleFrameReader{ ++ ct: ct, ++ b: b, ++ hasBeenRead: false, ++ hasBeenReadMu: &sync.Mutex{}, ++ } ++} ++ ++var _ FrameReader = &singleFrameReader{} ++ ++type singleFrameReader struct { ++ ct ContentType ++ b []byte ++ hasBeenRead bool ++ hasBeenReadMu *sync.Mutex ++} ++ ++func (r *singleFrameReader) ReadFrame() ([]byte, error) { ++ r.hasBeenReadMu.Lock() ++ defer r.hasBeenReadMu.Unlock() ++ // If ReadFrame() has been called once, just return io.EOF. ++ if r.hasBeenRead { ++ return nil, io.EOF ++ } ++ // The first time, mark that we've read, and return the single frame ++ r.hasBeenRead = true ++ return r.b, nil ++} ++ ++func (r *singleFrameReader) ContentType() ContentType { return r.ct } ++func (r *singleFrameReader) Close() error { return nil } +diff --git a/pkg/serializer/frame_reader_test.go b/pkg/serializer/frame_reader_test.go +index a696ed7..063ed8a 100644 +--- a/pkg/serializer/frame_reader_test.go ++++ b/pkg/serializer/frame_reader_test.go +@@ -5,6 +5,7 @@ import ( + "io/ioutil" + "reflect" + "strings" ++ "sync" + "testing" + + "k8s.io/apimachinery/pkg/runtime/serializer/json" +@@ -92,6 +93,7 @@ func Test_FrameReader_ReadFrame(t *testing.T) { + t.Run(tt.name, func(t *testing.T) { + rf := &frameReader{ + rc: tt.fields.rc, ++ rcMu: &sync.Mutex{}, + bufSize: tt.fields.bufSize, + maxFrameSize: tt.fields.maxFrameSize, + } +diff --git a/pkg/serializer/options.go b/pkg/serializer/options.go +new file mode 100644 +index 0000000..e5736e6 +--- /dev/null ++++ b/pkg/serializer/options.go +@@ -0,0 +1,258 @@ ++package serializer ++ ++import ( ++ "k8s.io/utils/pointer" ++) ++ ++// TODO: Import k8s.io/utils/pointer instead of baking our own ptrutils package. ++ ++type EncodeOption interface { ++ ApplyToEncode(*EncodeOptions) ++} ++ ++func defaultEncodeOpts() *EncodeOptions { ++ return &EncodeOptions{ ++ // Default to "pretty encoding" ++ JSONIndent: pointer.Int32Ptr(2), ++ PreserveComments: PreserveCommentsDisable, ++ } ++} ++ ++type EncodeOptions struct { ++ // Indent JSON encoding output with this many spaces. ++ // Set this to 0, use PrettyEncode(false) or JSONIndent(0) to disable pretty output. ++ // Only applicable to ContentTypeJSON framers. ++ // ++ // Default: 2, i.e. pretty output ++ // TODO: Make this a property of the FrameWriter instead? ++ JSONIndent *int32 ++ ++ // Whether to preserve YAML comments internally. ++ // This only works for objects embedding metav1.ObjectMeta. ++ // ++ // Only applicable to ContentTypeYAML framers. Using any other framer will be silently ignored. ++ // ++ // Usage of this option also requires setting the PreserveComments in DecodeOptions, too. ++ // ++ // Default: PreserveCommentsDisable ++ PreserveComments PreserveComments ++ ++ // TODO: Maybe consider an option to always convert to the preferred version (not just internal) ++} ++ ++var _ EncodeOption = &EncodeOptions{} ++ ++func (o *EncodeOptions) ApplyToEncode(target *EncodeOptions) { ++ if o.JSONIndent != nil { ++ target.JSONIndent = o.JSONIndent ++ } ++ if o.PreserveComments != 0 { ++ target.PreserveComments = o.PreserveComments ++ } ++} ++ ++func (o *EncodeOptions) ApplyOptions(opts []EncodeOption) *EncodeOptions { ++ for _, opt := range opts { ++ opt.ApplyToEncode(o) ++ } ++ // it is guaranteed that all options are non-nil, as defaultEncodeOpts() includes all fields ++ return o ++} ++ ++// Whether to preserve YAML comments internally. ++// This only works for objects embedding metav1.ObjectMeta. ++// ++// Only applicable to ContentTypeYAML framers. Using any other framer will be silently ignored. ++// TODO: Add a BestEffort mode ++type PreserveComments int ++ ++const ( ++ // PreserveCommentsDisable means do not try to preserve comments ++ PreserveCommentsDisable PreserveComments = 1 + iota ++ // PreserveCommentsStrict means try to preserve comments, and fail if it does not work ++ PreserveCommentsStrict ++) ++ ++var _ EncodeOption = PreserveComments(0) ++var _ DecodeOption = PreserveComments(0) ++ ++func (p PreserveComments) ApplyToEncode(target *EncodeOptions) { ++ // TODO: Validate? ++ target.PreserveComments = p ++} ++ ++func (p PreserveComments) ApplyToDecode(target *DecodeOptions) { ++ // TODO: Validate? ++ target.PreserveComments = p ++} ++ ++// Indent JSON encoding output with this many spaces. ++// Use PrettyEncode(false) or JSONIndent(0) to disable pretty output. ++// Only applicable to ContentTypeJSON framers. ++type JSONIndent int32 ++ ++var _ EncodeOption = JSONIndent(0) ++ ++func (i JSONIndent) ApplyToEncode(target *EncodeOptions) { ++ target.JSONIndent = pointer.Int32Ptr(int32(i)) ++} ++ ++// Shorthand for JSONIndent(0) if false, or JSONIndent(2) if true ++type PrettyEncode bool ++ ++var _ EncodeOption = PrettyEncode(false) ++ ++func (pretty PrettyEncode) ApplyToEncode(target *EncodeOptions) { ++ if pretty { ++ JSONIndent(2).ApplyToEncode(target) ++ } else { ++ JSONIndent(0).ApplyToEncode(target) ++ } ++} ++ ++// DECODING ++ ++type DecodeOption interface { ++ ApplyToDecode(*DecodeOptions) ++} ++ ++func defaultDecodeOpts() *DecodeOptions { ++ return &DecodeOptions{ ++ ConvertToHub: pointer.BoolPtr(false), ++ Strict: pointer.BoolPtr(true), ++ Default: pointer.BoolPtr(false), ++ DecodeListElements: pointer.BoolPtr(true), ++ PreserveComments: PreserveCommentsDisable, ++ DecodeUnknown: pointer.BoolPtr(false), ++ } ++} ++ ++type DecodeOptions struct { ++ // Not applicable for Decoder.DecodeInto(). If true, the decoded external object ++ // will be converted into its hub (or internal, where applicable) representation. ++ // Otherwise, the decoded object will be left in its external representation. ++ // ++ // Default: false ++ ConvertToHub *bool ++ ++ // Parse the YAML/JSON in strict mode, returning a specific error if the input ++ // contains duplicate or unknown fields or formatting errors. ++ // ++ // Default: true ++ Strict *bool ++ ++ // Automatically default the decoded object. ++ // Default: false ++ Default *bool ++ ++ // Only applicable for Decoder.DecodeAll(). If the underlying data contains a v1.List, ++ // the items of the list will be traversed, decoded into their respective types, and ++ // appended to the returned slice. The v1.List will in this case not be returned. ++ // This conversion does NOT support preserving comments. If the given scheme doesn't ++ // recognize the v1.List, before using it will be registered automatically. ++ // ++ // Default: true ++ DecodeListElements *bool ++ ++ // Whether to preserve YAML comments internally. ++ // This only works for objects embedding metav1.ObjectMeta. ++ // ++ // Only applicable to ContentTypeYAML framers. Using any other framer will be silently ignored. ++ // ++ // Usage of this option also requires setting the PreserveComments in EncodeOptions, too. ++ // ++ // Default: PreserveCommentsDisable ++ PreserveComments PreserveComments ++ ++ // DecodeUnknown specifies whether decode objects with an unknown GroupVersionKind into a ++ // *runtime.Unknown object when running Decode(All) (true value) or to return an error when ++ // any unrecognized type is found (false value). ++ // ++ // Default: false ++ DecodeUnknown *bool ++} ++ ++var _ DecodeOption = &DecodeOptions{} ++ ++func (o *DecodeOptions) ApplyToDecode(target *DecodeOptions) { ++ if o.ConvertToHub != nil { ++ target.ConvertToHub = o.ConvertToHub ++ } ++ if o.Strict != nil { ++ target.Strict = o.Strict ++ } ++ if o.Default != nil { ++ target.Default = o.Default ++ } ++ if o.DecodeListElements != nil { ++ target.DecodeListElements = o.DecodeListElements ++ } ++ if o.PreserveComments != 0 { ++ target.PreserveComments = o.PreserveComments ++ } ++ if o.DecodeUnknown != nil { ++ target.DecodeUnknown = o.DecodeUnknown ++ } ++} ++ ++func (o *DecodeOptions) ApplyOptions(opts []DecodeOption) *DecodeOptions { ++ for _, opt := range opts { ++ opt.ApplyToDecode(o) ++ } ++ // it is guaranteed that all options are non-nil, as defaultDecodeOpts() includes all fields ++ return o ++} ++ ++// Not applicable for Decoder.DecodeInto(). If true, the decoded external object ++// will be converted into its hub (or internal, where applicable) representation. ++// Otherwise, the decoded object will be left in its external representation. ++type ConvertToHub bool ++ ++var _ DecodeOption = ConvertToHub(false) ++ ++func (b ConvertToHub) ApplyToDecode(target *DecodeOptions) { ++ target.ConvertToHub = pointer.BoolPtr(bool(b)) ++} ++ ++// Parse the YAML/JSON in strict mode, returning a specific error if the input ++// contains duplicate or unknown fields or formatting errors. ++type DecodeStrict bool ++ ++var _ DecodeOption = DecodeStrict(false) ++ ++func (b DecodeStrict) ApplyToDecode(target *DecodeOptions) { ++ target.Strict = pointer.BoolPtr(bool(b)) ++} ++ ++// Automatically default the decoded object. ++type DefaultAtDecode bool ++ ++var _ DecodeOption = DefaultAtDecode(false) ++ ++func (b DefaultAtDecode) ApplyToDecode(target *DecodeOptions) { ++ target.Default = pointer.BoolPtr(bool(b)) ++} ++ ++// Only applicable for Decoder.DecodeAll(). If the underlying data contains a v1.List, ++// the items of the list will be traversed, decoded into their respective types, and ++// appended to the returned slice. The v1.List will in this case not be returned. ++// This conversion does NOT support preserving comments. If the given scheme doesn't ++// recognize the v1.List, before using it will be registered automatically. ++type DecodeListElements bool ++ ++var _ DecodeOption = DecodeListElements(false) ++ ++func (b DecodeListElements) ApplyToDecode(target *DecodeOptions) { ++ target.DecodeListElements = pointer.BoolPtr(bool(b)) ++} ++ ++// DecodeUnknown specifies whether decode objects with an unknown GroupVersionKind into a ++// *runtime.Unknown object when running Decode(All) (true value) or to return an error when ++// any unrecognized type is found (false value). ++type DecodeUnknown bool ++ ++var _ DecodeOption = DecodeUnknown(false) ++ ++func (b DecodeUnknown) ApplyToDecode(target *DecodeOptions) { ++ target.DecodeUnknown = pointer.BoolPtr(bool(b)) ++} +diff --git a/pkg/serializer/patch.go b/pkg/serializer/patch.go +new file mode 100644 +index 0000000..bd580e0 +--- /dev/null ++++ b/pkg/serializer/patch.go +@@ -0,0 +1,124 @@ ++package serializer ++ ++import ( ++ "bytes" ++ "encoding/json" ++ "errors" ++ ++ "github.com/weaveworks/libgitops/pkg/util/patch" ++ "k8s.io/apimachinery/pkg/runtime" ++ "k8s.io/apimachinery/pkg/util/strategicpatch" ++ openapi "k8s.io/kube-openapi/pkg/util/proto" ++) ++ ++// TODO: Move pkg/util/patch under pkg/serializer? ++ ++type Patcher interface { ++ // ApplyOnStruct applies the given patch (JSON-encoded) using the given BytePatcher ++ // (that knows how to operate on that kind of patch type) into obj. ++ // ++ // obj MUST be a typed object. Unversioned, partial or unstructured objects are not ++ // supported. For those use-cases, convert your object into an unstructured one, and ++ // pass it to ApplyOnUnstructured. ++ // ++ // obj MUST NOT be an internal type. If you operate on an internal object as your "hub", ++ // convert the object yourself first to the GroupVersion of the patch bytes, and then ++ // convert back after this call. ++ // ++ // In case the patch would require knowledge about the schema (e.g. StrategicMergePatch), ++ // this function looks that metadata up using reflection of obj. ++ ApplyOnStruct(bytePatcher patch.BytePatcher, patch []byte, obj runtime.Object) error ++ ++ // ApplyOnUnstructured applies the given patch (JSON-encoded) using the given BytePatcher ++ // (that knows how to operate on that kind of patch type) into the unstructured obj. ++ // ++ // If knowledge about the schema is required by the patch type (e.g. StrategicMergePatch), ++ // it is the liability of the caller to provide an OpenAPI schema. ++ ApplyOnUnstructured(bytePatcher patch.BytePatcher, patch []byte, obj runtime.Unstructured, schema openapi.Schema) error ++} ++ ++type patcher struct { ++ *schemeAndCodec ++} ++ ++// ApplyOnStruct applies the given patch (JSON-encoded) using the given BytePatcher ++// (that knows how to operate on that kind of patch type) into obj. ++// ++// obj MUST be a typed object. Unversioned, partial or unstructured objects are not ++// supported. For those use-cases, convert your object into an unstructured one, and ++// pass it to ApplyOnUnstructured. ++// ++// obj MUST NOT be an internal type. If you operate on an internal object as your "hub", ++// convert the object yourself first to the GroupVersion of the patch bytes, and then ++// convert back after this call. ++// ++// In case the patch would require knowledge about the schema (e.g. StrategicMergePatch), ++// this function looks that metadata up using reflection of obj. ++func (p *patcher) ApplyOnStruct(bytePatcher patch.BytePatcher, patch []byte, obj runtime.Object) error { ++ // Require that obj is typed ++ if !IsTyped(obj, p.scheme) { ++ return errors.New("obj must be typed") ++ } ++ // Get the GVK so we can check if obj is internal ++ gvk, err := GVKForObject(p.scheme, obj) ++ if err != nil { ++ return err ++ } ++ // It must not be internal, as we will encode it soon. ++ if gvk.Version == runtime.APIVersionInternal { ++ return errors.New("obj must not be internal") ++ } ++ ++ // Create a non-pretty encoder ++ encopt := *defaultEncodeOpts().ApplyOptions([]EncodeOption{PrettyEncode(false)}) ++ enc := newEncoder(p.schemeAndCodec, encopt) ++ // Encode without conversion to the buffer ++ var buf bytes.Buffer ++ if err := enc.EncodeForGroupVersion(NewJSONFrameWriter(&buf), obj, gvk.GroupVersion()); err != nil { ++ return err ++ } ++ ++ // Get the schema in case needed by the BytePatcher ++ schema, err := strategicpatch.NewPatchMetaFromStruct(obj) ++ if err != nil { ++ return err ++ } ++ ++ // Apply the patch, and get the new JSON out ++ newJSON, err := bytePatcher.Apply(buf.Bytes(), patch, schema) ++ if err != nil { ++ return err ++ } ++ ++ // Decode into the object to apply the changes ++ fr := NewSingleFrameReader(newJSON, ContentTypeJSON) ++ dec := newDecoder(p.schemeAndCodec, *defaultDecodeOpts()) ++ if err := dec.DecodeInto(fr, obj); err != nil { ++ return err ++ } ++ ++ return nil ++} ++ ++func (p *patcher) ApplyOnUnstructured(bytePatcher patch.BytePatcher, patch []byte, obj runtime.Unstructured, schema openapi.Schema) error { ++ // Marshal the object to form the source JSON ++ sourceJSON, err := json.Marshal(obj) ++ if err != nil { ++ return err ++ } ++ ++ // Conditionally get the schema from the provided OpenAPI spec ++ var patchMeta strategicpatch.LookupPatchMeta ++ if schema != nil { ++ patchMeta = strategicpatch.NewPatchMetaFromOpenAPI(schema) ++ } ++ ++ // Apply the patch, and get the new JSON out ++ newJSON, err := bytePatcher.Apply(sourceJSON, patch, patchMeta) ++ if err != nil { ++ return err ++ } ++ ++ // Decode back into obj ++ return json.Unmarshal(newJSON, obj) ++} +diff --git a/pkg/serializer/serializer.go b/pkg/serializer/serializer.go +index eb798c9..fbbcdd1 100644 +--- a/pkg/serializer/serializer.go ++++ b/pkg/serializer/serializer.go +@@ -3,6 +3,7 @@ package serializer + import ( + "errors" + "fmt" ++ "sync" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +@@ -22,8 +23,12 @@ const ( + ContentTypeYAML = ContentType(runtime.ContentTypeYAML) + ) + +-// ErrUnsupportedContentType is returned if the specified content type isn't supported +-var ErrUnsupportedContentType = errors.New("unsupported content type") ++var ( ++ // ErrUnsupportedContentType is returned if the specified content type isn't supported ++ ErrUnsupportedContentType = errors.New("unsupported content type") ++ // ErrObjectIsNotList is returned when a runtime.Object was not a List type ++ ErrObjectIsNotList = errors.New("given runtime.Object is not a *List type, or does not implement metav1.ListInterface") ++) + + // ContentTyped is an interface for objects that are specific to a set ContentType. + type ContentTyped interface { +@@ -31,6 +36,8 @@ type ContentTyped interface { + ContentType() ContentType + } + ++func (ct ContentType) ContentType() ContentType { return ct } ++ + // Serializer is an interface providing high-level decoding/encoding functionality + // for types registered in a *runtime.Scheme + type Serializer interface { +@@ -38,13 +45,13 @@ type Serializer interface { + // a FrameWriter. The decoder can be customized by passing some options (e.g. WithDecodingOptions) + // to this call. + // The decoder supports both "classic" API Machinery objects and controller-runtime CRDs +- Decoder(optsFn ...DecodingOptionsFunc) Decoder ++ Decoder(optsFn ...DecodeOption) Decoder + + // Encoder is a high-level interface for encoding Kubernetes API Machinery objects and writing them + // to a FrameWriter. The encoder can be customized by passing some options (e.g. WithEncodingOptions) + // to this call. + // The encoder supports both "classic" API Machinery objects and controller-runtime CRDs +- Encoder(optsFn ...EncodingOptionsFunc) Encoder ++ Encoder(optsFn ...EncodeOption) Encoder + + // Converter is a high-level interface for converting objects between different versions + // The converter supports both "classic" API Machinery objects and controller-runtime CRDs +@@ -53,6 +60,8 @@ type Serializer interface { + // Defaulter is a high-level interface for accessing defaulting functions in a scheme + Defaulter() Defaulter + ++ Patcher() Patcher ++ + // Scheme provides access to the underlying runtime.Scheme, may be used for low-level access to + // the "type universe" and advanced conversion/defaulting features + Scheme() *runtime.Scheme +@@ -63,8 +72,10 @@ type Serializer interface { + } + + type schemeAndCodec struct { +- scheme *runtime.Scheme +- codecs *k8sserializer.CodecFactory ++ // scheme is not thread-safe, hence it is guarded by a mutex ++ scheme *runtime.Scheme ++ schemeMu *sync.Mutex ++ codecs *k8sserializer.CodecFactory + } + + // Encoder is a high-level interface for encoding Kubernetes API Machinery objects and writing them +@@ -186,13 +197,16 @@ func NewSerializer(scheme *runtime.Scheme, codecs *k8sserializer.CodecFactory) S + *codecs = k8sserializer.NewCodecFactory(scheme) + } + ++ schemeCodec := &schemeAndCodec{ ++ scheme: scheme, ++ schemeMu: &sync.Mutex{}, ++ codecs: codecs, ++ } + return &serializer{ +- schemeAndCodec: &schemeAndCodec{ +- scheme: scheme, +- codecs: codecs, +- }, +- converter: newConverter(scheme), +- defaulter: newDefaulter(scheme), ++ schemeAndCodec: schemeCodec, ++ converter: newConverter(scheme), ++ defaulter: newDefaulter(scheme), ++ patcher: &patcher{schemeCodec}, + } + } + +@@ -201,6 +215,7 @@ type serializer struct { + *schemeAndCodec + converter *converter + defaulter *defaulter ++ patcher *patcher + } + + // Scheme provides access to the underlying runtime.Scheme, may be used for low-level access to +@@ -215,14 +230,12 @@ func (s *serializer) Codecs() *k8sserializer.CodecFactory { + return s.codecs + } + +-func (s *serializer) Decoder(optFns ...DecodingOptionsFunc) Decoder { +- opts := newDecodeOpts(optFns...) +- return newDecoder(s.schemeAndCodec, *opts) ++func (s *serializer) Decoder(opts ...DecodeOption) Decoder { ++ return newDecoder(s.schemeAndCodec, *defaultDecodeOpts().ApplyOptions(opts)) + } + +-func (s *serializer) Encoder(optFns ...EncodingOptionsFunc) Encoder { +- opts := newEncodeOpts(optFns...) +- return newEncoder(s.schemeAndCodec, *opts) ++func (s *serializer) Encoder(opts ...EncodeOption) Encoder { ++ return newEncoder(s.schemeAndCodec, *defaultEncodeOpts().ApplyOptions(opts)) + } + + func (s *serializer) Converter() Converter { +@@ -233,6 +246,10 @@ func (s *serializer) Defaulter() Defaulter { + return s.defaulter + } + ++func (s *serializer) Patcher() Patcher { ++ return s.patcher ++} ++ + func prioritizedVersionForGroup(scheme *runtime.Scheme, groupName string) (schema.GroupVersion, error) { + // Get the prioritized versions for the given group + gvs := scheme.PrioritizedVersionsForGroup(groupName) +@@ -242,23 +259,3 @@ func prioritizedVersionForGroup(scheme *runtime.Scheme, groupName string) (schem + // Use the first, preferred, (external) version + return gvs[0], nil + } +- +-func GVKForObject(scheme *runtime.Scheme, obj runtime.Object) (schema.GroupVersionKind, error) { +- // If we already have TypeMeta filled in here, just use it +- // TODO: This is probably not needed +- gvk := obj.GetObjectKind().GroupVersionKind() +- if !gvk.Empty() { +- return gvk, nil +- } +- +- // TODO: If there are two GVKs returned, it's probably a misconfiguration in the scheme +- // It might be expected though, and we can tolerate setting the GVK manually IFF there are more than +- // one ObjectKind AND the given GVK is one of them. +- +- // Get the possible kinds for the object +- gvks, unversioned, err := scheme.ObjectKinds(obj) +- if unversioned || err != nil || len(gvks) != 1 { +- return schema.GroupVersionKind{}, fmt.Errorf("unversioned %t or err %v or invalid gvks %v", unversioned, err, gvks) +- } +- return gvks[0], nil +-} +diff --git a/pkg/serializer/serializer_test.go b/pkg/serializer/serializer_test.go +index ba23985..c475ec7 100644 +--- a/pkg/serializer/serializer_test.go ++++ b/pkg/serializer/serializer_test.go +@@ -21,8 +21,8 @@ var ( + codecs = k8sserializer.NewCodecFactory(scheme) + ourserializer = NewSerializer(scheme, &codecs) + defaultEncoder = ourserializer.Encoder( +- WithPrettyEncode(false), // TODO: Also test the pretty serializer +- WithCommentsEncode(true), ++ PrettyEncode(false), // TODO: Also test the pretty serializer ++ PreserveCommentsStrict, + ) + + groupname = "foogroup" +@@ -402,8 +402,8 @@ func TestDecode(t *testing.T) { + for _, rt := range tests { + t.Run(rt.name, func(t2 *testing.T) { + obj, actual := ourserializer.Decoder( +- WithDefaultsDecode(rt.doDefaulting), +- WithConvertToHubDecode(rt.doConversion), ++ DefaultAtDecode(rt.doDefaulting), ++ ConvertToHub(rt.doConversion), + ).Decode(NewYAMLFrameReader(FromBytes(rt.data))) + if (actual != nil) != rt.expectedErr { + t2.Errorf("expected error %t but actual %t: %v", rt.expectedErr, actual != nil, actual) +@@ -444,7 +444,7 @@ func TestDecodeInto(t *testing.T) { + t.Run(rt.name, func(t2 *testing.T) { + + actual := ourserializer.Decoder( +- WithDefaultsDecode(rt.doDefaulting), ++ DefaultAtDecode(rt.doDefaulting), + ).DecodeInto(NewYAMLFrameReader(FromBytes(rt.data)), rt.obj) + if (actual != nil) != rt.expectedErr { + t2.Errorf("expected error %t but actual %t: %v", rt.expectedErr, actual != nil, actual) +@@ -484,8 +484,8 @@ func TestDecodeAll(t *testing.T) { + for _, rt := range tests { + t.Run(rt.name, func(t2 *testing.T) { + objs, actual := ourserializer.Decoder( +- WithDefaultsDecode(rt.doDefaulting), +- WithListElementsDecoding(rt.listSplit), ++ DefaultAtDecode(rt.doDefaulting), ++ DecodeListElements(rt.listSplit), + ).DecodeAll(NewYAMLFrameReader(FromBytes(rt.data))) + if (actual != nil) != rt.expectedErr { + t2.Errorf("expected error %t but actual %t: %v", rt.expectedErr, actual != nil, actual) +@@ -527,7 +527,7 @@ func TestDecodeUnknown(t *testing.T) { + for _, rt := range tests { + t.Run(rt.name, func(t2 *testing.T) { + obj, actual := ourserializer.Decoder( +- WithUnknownDecode(rt.unknown), ++ DecodeUnknown(rt.unknown), + ).Decode(NewYAMLFrameReader(FromBytes(rt.data))) + if (actual != nil) != rt.expectedErr { + t2.Errorf("expected error %t but actual %t: %v", rt.expectedErr, actual != nil, actual) +@@ -560,9 +560,9 @@ func TestRoundtrip(t *testing.T) { + for _, rt := range tests { + t.Run(rt.name, func(t2 *testing.T) { + obj, err := ourserializer.Decoder( +- WithConvertToHubDecode(true), +- WithCommentsDecode(true), +- WithUnknownDecode(true), ++ ConvertToHub(true), ++ PreserveCommentsStrict, ++ DecodeUnknown(true), + ).Decode(NewYAMLFrameReader(FromBytes(rt.data))) + if err != nil { + t2.Errorf("unexpected decode error: %v", err) +diff --git a/pkg/serializer/utils.go b/pkg/serializer/utils.go +new file mode 100644 +index 0000000..f916a7a +--- /dev/null ++++ b/pkg/serializer/utils.go +@@ -0,0 +1,121 @@ ++package serializer ++ ++import ( ++ "fmt" ++ "strings" ++ ++ "k8s.io/apimachinery/pkg/api/meta" ++ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ++ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ++ "k8s.io/apimachinery/pkg/runtime" ++ "k8s.io/apimachinery/pkg/runtime/schema" ++ "sigs.k8s.io/controller-runtime/pkg/client" ++ "sigs.k8s.io/controller-runtime/pkg/client/apiutil" ++) ++ ++func GVKForObject(scheme *runtime.Scheme, obj runtime.Object) (schema.GroupVersionKind, error) { ++ // Safety check: one should not do this ++ if obj == nil || obj.GetObjectKind() == nil { ++ return schema.GroupVersionKind{}, fmt.Errorf("GVKForObject: obj or obj.GetObjectKind() must not be nil") ++ } ++ ++ // If this is a runtime.Unknown object, return the GVK stored in TypeMeta ++ if gvk := obj.GetObjectKind().GroupVersionKind(); IsUnknown(obj) && !gvk.Empty() { ++ return gvk, nil ++ } ++ ++ // Special case: Allow objects with two versions to be registered, when the caller is specific ++ // about what version they want populated. ++ // This is needed essentially for working around that there are specific K8s types (structs) ++ // that have been registered with multiple GVKs (e.g. a Deployment struct in both apps & extensions) ++ // TODO: Maybe there is a better way to solve this? Remove unwanted entries from the scheme typeToGVK ++ // map manually? ++ gvks, _, _ := scheme.ObjectKinds(obj) ++ if len(gvks) > 1 { ++ // If we have a configuration with more than one gvk for the same object, ++ // check the set GVK on the object to "choose" the right one, if exists in the list ++ setGVK := obj.GetObjectKind().GroupVersionKind() ++ if !setGVK.Empty() { ++ for _, gvk := range gvks { ++ if EqualsGVK(setGVK, gvk) { ++ return gvk, nil ++ } ++ } ++ } ++ } ++ ++ // TODO: Should we just copy-paste this one, or move it into k8s core to avoid importing controller-runtime ++ // only for this function? ++ return apiutil.GVKForObject(obj, scheme) ++} ++ ++// GVKForList returns the GroupVersionKind for the items in a given List type. ++// In the case of Unstructured or PartialObjectMetadata, it is required that this ++// information is already set in TypeMeta. The "List" suffix is never returned. ++func GVKForList(obj client.ObjectList, scheme *runtime.Scheme) (schema.GroupVersionKind, error) { ++ // First, get the GVK as normal. ++ gvk, err := GVKForObject(scheme, obj) ++ if err != nil { ++ return schema.GroupVersionKind{}, err ++ } ++ // Make sure this is a list type, i.e. it has the an "Items" field. ++ isList := meta.IsListType(obj) ++ if !isList { ++ return schema.GroupVersionKind{}, ErrObjectIsNotList ++ } ++ // Make sure the returned GVK never ends in List. ++ gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") ++ return gvk, nil ++} ++ ++// EqualsGK returns true if gk1 and gk2 have the same fields. ++func EqualsGK(gk1, gk2 schema.GroupKind) bool { ++ return gk1.Group == gk2.Group && gk1.Kind == gk2.Kind ++} ++ ++// EqualsGVK returns true if gvk1 and gvk2 have the same fields. ++func EqualsGVK(gvk1, gvk2 schema.GroupVersionKind) bool { ++ return EqualsGK(gvk1.GroupKind(), gvk2.GroupKind()) && gvk1.Version == gvk2.Version ++} ++ ++func IsUnknown(obj runtime.Object) bool { ++ _, isUnknown := obj.(*runtime.Unknown) ++ return isUnknown ++} ++ ++func IsPartialObject(obj runtime.Object) bool { ++ _, isPartial := obj.(*metav1.PartialObjectMetadata) ++ return isPartial ++} ++ ++func IsPartialObjectList(obj runtime.Object) bool { ++ _, isPartialList := obj.(*metav1.PartialObjectMetadataList) ++ return isPartialList ++} ++ ++// IsUnstructured checks if obj is runtime.Unstructured ++func IsUnstructured(obj runtime.Object) bool { ++ _, isUnstructured := obj.(runtime.Unstructured) ++ return isUnstructured ++} ++ ++// IsUnstructuredList checks if obj is *unstructured.UnstructuredList ++func IsUnstructuredList(obj runtime.Object) bool { ++ _, isUnstructuredList := obj.(*unstructured.UnstructuredList) ++ return isUnstructuredList ++} ++ ++// IsNonConvertible returns true for unstructured, partial and unknown objects ++// that should not be converted. ++func IsNonConvertible(obj runtime.Object) bool { ++ // TODO: Should Lists also be marked non-convertible? ++ // IsUnstructured also covers IsUnstructuredList -- *UnstructuredList implements runtime.Unstructured ++ return IsUnstructured(obj) || IsPartialObject(obj) || IsPartialObjectList(obj) || IsUnknown(obj) ++} ++ ++// IsTyped returns true if the object is typed, i.e. registered with the given ++// scheme and not unversioned. ++func IsTyped(obj runtime.Object, scheme *runtime.Scheme) bool { ++ _, isUnversioned, err := scheme.ObjectKinds(obj) ++ return !isUnversioned && err == nil ++} +diff --git a/pkg/storage/backend/backend.go b/pkg/storage/backend/backend.go +new file mode 100644 +index 0000000..39d769e +--- /dev/null ++++ b/pkg/storage/backend/backend.go +@@ -0,0 +1,332 @@ ++package backend ++ ++import ( ++ "bytes" ++ "context" ++ "errors" ++ "fmt" ++ ++ "github.com/weaveworks/libgitops/pkg/serializer" ++ "github.com/weaveworks/libgitops/pkg/storage" ++ "github.com/weaveworks/libgitops/pkg/storage/core" ++ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ++ "k8s.io/apimachinery/pkg/runtime" ++ "k8s.io/apimachinery/pkg/util/sets" ++) ++ ++var ( ++ // ErrCannotSaveMetadata is returned if the user tries to save metadata-only objects ++ ErrCannotSaveMetadata = errors.New("cannot save (Create|Update|Patch) *metav1.PartialObjectMetadata") ++ // ErrNameRequired is returned when .metadata.name is unset ++ // TODO: Support generateName? ++ ErrNameRequired = errors.New(".metadata.name is required") ++) ++ ++// TODO: Make a *core.Unknown that has ++// 1. TypeMeta ++// 2. DeepCopies (for Object compatibility), ++// 3. ObjectMeta ++// 4. Spec { Data []byte, ContentType ContentType, Object interface{} } ++// 5. Status { Data []byte, ContentType ContentType, Object interface{} } ++// TODO: Need to make sure we never write this internal struct to disk (MarshalJSON error?) ++ ++type Accessors interface { ++ Storage() storage.Storage ++ NamespaceEnforcer() NamespaceEnforcer ++ Scheme() *runtime.Scheme ++} ++ ++type WriteAccessors interface { ++ Validator() Validator ++ StorageVersioner() StorageVersioner ++} ++ ++type Reader interface { ++ Accessors ++ ++ Get(ctx context.Context, obj core.Object) error ++ storage.Lister ++} ++ ++type Writer interface { ++ Accessors ++ WriteAccessors ++ ++ Create(ctx context.Context, obj core.Object) error ++ Update(ctx context.Context, obj core.Object) error ++ Delete(ctx context.Context, obj core.Object) error ++} ++ ++type StatusWriter interface { ++ Accessors ++ WriteAccessors ++ ++ UpdateStatus(ctx context.Context, obj core.Object) error ++} ++ ++type Backend interface { ++ Reader ++ Writer ++ StatusWriter ++} ++ ++type ChangeOperation string ++ ++const ( ++ ChangeOperationCreate ChangeOperation = "create" ++ ChangeOperationUpdate ChangeOperation = "update" ++ ChangeOperationDelete ChangeOperation = "delete" ++) ++ ++type Validator interface { ++ ValidateChange(ctx context.Context, backend Reader, op ChangeOperation, obj core.Object) error ++} ++ ++type StorageVersioner interface { ++ // TODO: Do we need the context here? ++ StorageVersion(ctx context.Context, id core.ObjectID) (core.GroupVersion, error) ++} ++ ++func NewGeneric( ++ storage storage.Storage, ++ serializer serializer.Serializer, // TODO: only scheme required, encode/decode optional? ++ enforcer NamespaceEnforcer, ++ validator Validator, // TODO: optional? ++ versioner StorageVersioner, // TODO: optional? ++) (*Generic, error) { ++ if storage == nil { ++ return nil, fmt.Errorf("storage is mandatory") ++ } ++ if serializer == nil { // TODO: relax this to scheme, and add encoder/decoder to opts? ++ return nil, fmt.Errorf("serializer is mandatory") ++ } ++ if enforcer == nil { ++ return nil, fmt.Errorf("enforcer is mandatory") ++ } ++ // TODO: validate options ++ return &Generic{ ++ scheme: serializer.Scheme(), ++ encoder: serializer.Encoder(), ++ decoder: serializer.Decoder(), ++ ++ storage: storage, ++ enforcer: enforcer, ++ validator: validator, ++ versioner: versioner, ++ }, nil ++} ++ ++var _ Backend = &Generic{} ++ ++type Generic struct { ++ scheme *runtime.Scheme ++ decoder serializer.Decoder ++ encoder serializer.Encoder ++ ++ storage storage.Storage ++ enforcer NamespaceEnforcer ++ validator Validator ++ versioner StorageVersioner ++} ++ ++func (b *Generic) Scheme() *runtime.Scheme { ++ return b.scheme ++} ++ ++func (b *Generic) Storage() storage.Storage { ++ return b.storage ++} ++ ++func (b *Generic) NamespaceEnforcer() NamespaceEnforcer { ++ return b.enforcer ++} ++ ++func (b *Generic) Validator() Validator { ++ return b.validator ++} ++ ++func (b *Generic) StorageVersioner() StorageVersioner { ++ return b.versioner ++} ++ ++func (b *Generic) Get(ctx context.Context, obj core.Object) error { ++ // Get the versioned ID for the given obj. This might mutate obj wrt namespacing info. ++ id, err := b.idForObj(ctx, obj) ++ if err != nil { ++ return err ++ } ++ // Read the underlying bytes ++ content, err := b.storage.Read(ctx, id) ++ if err != nil { ++ return err ++ } ++ // Get the right content type for the data ++ ct, err := b.storage.ContentType(ctx, id) ++ if err != nil { ++ return err ++ } ++ ++ // TODO: Support various decoding options, e.g. defaulting? ++ // TODO: Does this "replace" already-set fields? ++ return b.decoder.DecodeInto(serializer.NewSingleFrameReader(content, ct), obj) ++} ++ ++// ListNamespaces lists the available namespaces for the given GroupKind. ++// This function shall only be called for namespaced objects, it is up to ++// the caller to make sure they do not call this method for root-spaced ++// objects; for that the behavior is undefined (but returning an error ++// is recommended). ++func (b *Generic) ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.String, error) { ++ return b.storage.ListNamespaces(ctx, gk) ++} ++ ++// ListObjectKeys returns a list of names (with optionally, the namespace). ++// For namespaced GroupKinds, the caller must provide a namespace, and for ++// root-spaced GroupKinds, the caller must not. When namespaced, this function ++// must only return object keys for that given namespace. ++func (b *Generic) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) ([]core.UnversionedObjectID, error) { ++ return b.storage.ListObjectIDs(ctx, gk, namespace) ++} ++ ++func (b *Generic) Create(ctx context.Context, obj core.Object) error { ++ // We must never save metadata-only structs ++ if serializer.IsPartialObject(obj) { ++ return ErrCannotSaveMetadata ++ } ++ ++ // Get the versioned ID for the given obj. This might mutate obj wrt namespacing info. ++ id, err := b.idForObj(ctx, obj) ++ if err != nil { ++ return err ++ } ++ ++ // Do not create it if it already exists ++ if b.storage.Exists(ctx, id) { ++ return core.NewErrAlreadyExists(id) ++ } ++ ++ // Validate that the change is ok ++ // TODO: Don't make "upcasting" possible here ++ if b.validator != nil { ++ if err := b.validator.ValidateChange(ctx, b, ChangeOperationCreate, obj); err != nil { ++ return err ++ } ++ } ++ ++ // Internal, common write shared with Update() ++ return b.write(ctx, id, obj) ++} ++func (b *Generic) Update(ctx context.Context, obj core.Object) error { ++ // We must never save metadata-only structs ++ if serializer.IsPartialObject(obj) { ++ return ErrCannotSaveMetadata ++ } ++ ++ // Get the versioned ID for the given obj. This might mutate obj wrt namespacing info. ++ id, err := b.idForObj(ctx, obj) ++ if err != nil { ++ return err ++ } ++ ++ // Require that the object already exists ++ if !b.storage.Exists(ctx, id) { ++ return core.NewErrNotFound(id) ++ } ++ ++ // Validate that the change is ok ++ // TODO: Don't make "upcasting" possible here ++ if b.validator != nil { ++ if err := b.validator.ValidateChange(ctx, b, ChangeOperationUpdate, obj); err != nil { ++ return err ++ } ++ } ++ ++ // Internal, common write shared with Create() ++ return b.write(ctx, id, obj) ++} ++ ++func (b *Generic) UpdateStatus(ctx context.Context, obj core.Object) error { ++ return core.ErrNotImplemented // TODO ++} ++ ++func (b *Generic) write(ctx context.Context, id core.ObjectID, obj core.Object) error { ++ // TODO: Figure out how to get ContentType before the object actually exists! ++ ct, err := b.storage.ContentType(ctx, id) ++ if err != nil { ++ return err ++ } ++ // Resolve the desired storage version ++ /* TODO: re-enable later ++ gv, err := b.versioner.StorageVersion(ctx, id) ++ if err != nil { ++ return err ++ }*/ ++ ++ // Set creationTimestamp if not already populated ++ t := obj.GetCreationTimestamp() ++ if t.IsZero() { ++ obj.SetCreationTimestamp(metav1.Now()) ++ } ++ ++ var objBytes bytes.Buffer ++ // TODO: Work with any ContentType, not just JSON/YAML. Or, make a SingleFrameWriter for any ct. ++ err = b.encoder.Encode(serializer.NewFrameWriter(ct, &objBytes), obj) ++ if err != nil { ++ return err ++ } ++ ++ return b.storage.Write(ctx, id, objBytes.Bytes()) ++} ++ ++func (b *Generic) Delete(ctx context.Context, obj core.Object) error { ++ // Get the versioned ID for the given obj. This might mutate obj wrt namespacing info. ++ id, err := b.idForObj(ctx, obj) ++ if err != nil { ++ return err ++ } ++ ++ // Verify it did exist ++ if !b.storage.Exists(ctx, id) { ++ return core.NewErrNotFound(id) ++ } ++ ++ // Validate that the change is ok ++ // TODO: Don't make "upcasting" possible here ++ if b.validator != nil { ++ if err := b.validator.ValidateChange(ctx, b, ChangeOperationDelete, obj); err != nil { ++ return err ++ } ++ } ++ ++ // Delete it from the underlying storage ++ return b.storage.Delete(ctx, id) ++} ++ ++// Note: This should also work for unstructured and partial metadata objects ++func (b *Generic) idForObj(ctx context.Context, obj core.Object) (core.ObjectID, error) { ++ gvk, err := serializer.GVKForObject(b.scheme, obj) ++ if err != nil { ++ return nil, err ++ } ++ ++ // Object must always have .metadata.name set ++ if len(obj.GetName()) == 0 { ++ return nil, ErrNameRequired ++ } ++ ++ // Enforce the given namespace policy. This might mutate obj. ++ // TODO: disallow "upcasting" the Lister to a full-blown Storage? ++ if err := b.enforcer.EnforceNamespace( ++ ctx, ++ obj, ++ gvk, ++ b.Storage().Namespacer(), ++ b.Storage(), ++ ); err != nil { ++ return nil, err ++ } ++ ++ // At this point we know name is non-empty, and the namespace field is correct, ++ // according to policy ++ return core.NewObjectID(gvk, core.ObjectKeyFromObject(obj)), nil ++} +diff --git a/pkg/storage/backend/enforcer.go b/pkg/storage/backend/enforcer.go +new file mode 100644 +index 0000000..8553283 +--- /dev/null ++++ b/pkg/storage/backend/enforcer.go +@@ -0,0 +1,116 @@ ++package backend ++ ++import ( ++ "context" ++ "errors" ++ "fmt" ++ ++ "github.com/weaveworks/libgitops/pkg/storage" ++ "github.com/weaveworks/libgitops/pkg/storage/core" ++) ++ ++var ( ++ // ErrNoSuchNamespace means that the set of namespaces was searched in the ++ // system, but the requested namespace wasn't in that list. ++ ErrNoSuchNamespace = errors.New("no such namespace in the system") ++) ++ ++// NamespaceEnforcer enforces a namespace policy for the Backend. ++type NamespaceEnforcer interface { ++ // EnforceNamespace makes sure that: ++ // a) Any namespaced object has a non-empty namespace field after this call ++ // b) Any non-namespaced object has an empty namespace field after this call ++ // c) The applicable namespace policy of the user's liking is enforced (e.g. ++ // that there are only certain valid namespaces that can be used). ++ // ++ // This call is allowed to mutate obj. gvk represents the GroupVersionKind ++ // of obj. The namespacer can be used to figure out if the given object is ++ // namespaced or not. The given lister might be used to list object IDs, ++ // or existing namespaces in the system. ++ // ++ // See GenericNamespaceEnforcer for an example implementation, or ++ // pkg/storage/kube.NewNamespaceEnforcer() for a sample application. ++ EnforceNamespace(ctx context.Context, obj core.Object, gvk core.GroupVersionKind, namespacer core.Namespacer, lister storage.Lister) error ++} ++ ++// GenericNamespaceEnforcer is a NamespaceEnforcer that: ++// a) sets a default namespace for namespaced objects that have ++// the namespace field left empty ++// b) makes sure non-namespaced objects do not have the namespace ++// field set, by pruning any previously-set value. ++// c) if NamespaceGroupKind is non-nil; lists valid Namespace objects ++// in the system (of the given GroupKind); and matches namespaced ++// objects' namespace field against the listed Namespace objects' ++// .metadata.name field. ++// ++// For an example of how to configure this enforcer in the way ++// Kubernetes itself (approximately) does, see pkg/storage/kube. ++// NewNamespaceEnforcer(). ++type GenericNamespaceEnforcer struct { ++ // DefaultNamespace describes the default namespace string ++ // that should be set, if a namespaced object's namespace ++ // field is empty. ++ // +required ++ DefaultNamespace string ++ // NamespaceGroupKind describes the GroupKind for Namespace ++ // objects in the system. If non-nil, objects with such ++ // GroupKind are listed, and their .metadata.name is matched ++ // against the current object's namespace field. If nil, any ++ // namespace value is considered valid. ++ // +optional ++ NamespaceGroupKind *core.GroupKind ++} ++ ++func (e GenericNamespaceEnforcer) EnforceNamespace(ctx context.Context, obj core.Object, gvk core.GroupVersionKind, namespacer core.Namespacer, lister storage.Lister) error { ++ // Get namespacing info ++ namespaced, err := namespacer.IsNamespaced(gvk.GroupKind()) ++ if err != nil { ++ return err ++ } ++ ++ // Enforce generic rules ++ ns := obj.GetNamespace() ++ if !namespaced { ++ // If a namespace was set, it must be sanitized, as non-namespaced ++ // resources must have namespace field empty. ++ if len(ns) != 0 { ++ obj.SetNamespace("") ++ } ++ return nil ++ } ++ // The resource is namespaced. ++ // If it is empty, set it to the default namespace. ++ if len(ns) == 0 { ++ // Verify that DefaultNamespace is non-empty ++ if len(e.DefaultNamespace) == 0 { ++ return fmt.Errorf("GenericNamespaceEnforcer.DefaultNamespace is mandatory: %w", core.ErrInvalidParameter) ++ } ++ // Mutate obj and set the namespace field to the default, then return ++ obj.SetNamespace(e.DefaultNamespace) ++ return nil ++ } ++ ++ // If the namespace field is set, but NamespaceGroupKind is ++ // nil, it means that any non-empty namespace value is ++ // valid. ++ if e.NamespaceGroupKind == nil { ++ return nil ++ } ++ ++ // However, if a Namespace GroupKind was given, look it up using ++ // the lister, and verify its .metadata.name matches the given ++ // namespace value. ++ objIDs, err := lister.ListObjectIDs(ctx, *e.NamespaceGroupKind, "") ++ if err != nil { ++ return err ++ } ++ // Loop through the IDs, and try to match it against the set ns ++ for _, id := range objIDs { ++ if id.ObjectKey().Name == ns { ++ // Found the namespace; this is a valid setting ++ return nil ++ } ++ } ++ // The set namespace doesn't belong to the set of valid namespaces, error ++ return fmt.Errorf("%w: %q", ErrNoSuchNamespace, ns) ++} +diff --git a/pkg/storage/cache/cache.go b/pkg/storage/cache/cache.go +deleted file mode 100644 +index 11a4991..0000000 +--- a/pkg/storage/cache/cache.go ++++ /dev/null +@@ -1,197 +0,0 @@ +-package cache +- +-/* +- +-TODO: Revisit if we need this file/package in the future. +- +-import ( +- log "github.com/sirupsen/logrus" +- "github.com/weaveworks/libgitops/pkg/runtime" +- "github.com/weaveworks/libgitops/pkg/serializer" +- "github.com/weaveworks/libgitops/pkg/storage" +- "k8s.io/apimachinery/pkg/runtime/schema" +-) +- +-// Cache is an intermediate caching layer, which conforms to Storage +-// Typically you back the cache with an actual storage +-type Cache interface { +- storage.Storage +- // Flush is used to write the state of the entire cache to storage +- // Warning: this is a very expensive operation +- Flush() error +-} +- +-type cache struct { +- // storage is the backing Storage for the cache +- // used to look up non-cached Objects +- storage storage.Storage +- +- // index caches the Objects by GroupVersionKind and UID +- // This guarantees uniqueness when looking up a specific Object +- index *index +-} +- +-var _ Cache = &cache{} +- +-func NewCache(backingStorage storage.Storage) Cache { +- c := &cache{ +- storage: backingStorage, +- index: newIndex(backingStorage), +- } +- +- return c +-} +- +-func (s *cache) Serializer() serializer.Serializer { +- return s.storage.Serializer() +-} +- +-func (c *cache) New(gvk schema.GroupVersionKind) (runtime.Object, error) { +- // Request the storage to create the Object. The +- // newly generated Object has not got an UID which +- // is required for indexing, so just return it +- // without storing it into the cache +- return c.storage.New(gvk) +-} +- +-func (c *cache) Get(gvk schema.GroupVersionKind, uid runtime.UID) (obj runtime.Object, err error) { +- log.Tracef("cache: Get %s with UID %q", gvk.Kind, uid) +- +- // If the requested Object resides in the cache, return it +- if obj, err = c.index.loadByID(gvk, uid); err != nil || obj != nil { +- return +- } +- +- // Request the Object from the storage +- obj, err = c.storage.Get(gvk, uid) +- +- // If no errors occurred, cache it +- if err == nil { +- err = c.index.store(obj) +- } +- +- return +-} +- +-func (c *cache) GetMeta(gvk schema.GroupVersionKind, uid runtime.UID) (obj runtime.Object, err error) { +- log.Tracef("cache: GetMeta %s with UID %q", gvk.Kind, uid) +- +- obj, err = c.storage.GetMeta(gvk, uid) +- +- // If no errors occurred while loading, store the Object in the cache +- if err == nil { +- err = c.index.storeMeta(obj) +- } +- +- return +-} +- +-func (c *cache) Set(gvk schema.GroupVersionKind, obj runtime.Object) error { +- log.Tracef("cache: Set %s with UID %q", gvk.Kind, obj.GetUID()) +- +- // Store the changed Object in the cache +- if err := c.index.store(obj); err != nil { +- return err +- } +- +- // TODO: For now the cache always flushes, we might add automatic flushing later +- return c.storage.Set(gvk, obj) +-} +- +-func (c *cache) Patch(gvk schema.GroupVersionKind, uid runtime.UID, patch []byte) error { +- // TODO: For now patches are always flushed, the cache will load the updated Object on-demand on access +- return c.storage.Patch(gvk, uid, patch) +-} +- +-func (c *cache) Delete(gvk schema.GroupVersionKind, uid runtime.UID) error { +- log.Tracef("cache: Delete %s with UID %q", gvk.Kind, uid) +- +- // Delete the given Object from the cache and storage +- c.index.delete(gvk, uid) +- return c.storage.Delete(gvk, uid) +-} +- +-type listFunc func(gvk schema.GroupVersionKind) ([]runtime.Object, error) +-type cacheStoreFunc func([]runtime.Object) error +- +-// list is a common handler for List and ListMeta +-func (c *cache) list(gvk schema.GroupVersionKind, slf, clf listFunc, csf cacheStoreFunc) (objs []runtime.Object, err error) { +- var storageCount uint64 +- if storageCount, err = c.storage.Count(gvk); err != nil { +- return +- } +- +- if c.index.count(gvk) != storageCount { +- log.Tracef("cache: miss when listing: %s", gvk) +- // If the cache doesn't track all of the Objects, request them from the storage +- if objs, err = slf(gvk); err != nil { +- // If no errors occurred, store the Objects in the cache +- err = csf(objs) +- } +- } else { +- log.Tracef("cache: hit when listing: %s", gvk) +- // If the cache tracks everything, return the cache's contents +- objs, err = clf(gvk) +- } +- +- return +-} +- +-func (c *cache) List(gvk schema.GroupVersionKind) ([]runtime.Object, error) { +- return c.list(gvk, c.storage.List, c.index.list, c.index.storeAll) +-} +- +-func (c *cache) ListMeta(gvk schema.GroupVersionKind) ([]runtime.Object, error) { +- return c.list(gvk, c.storage.ListMeta, c.index.listMeta, c.index.storeAllMeta) +-} +- +-func (c *cache) Count(gvk schema.GroupVersionKind) (uint64, error) { +- // The cache is transparent about how many items it has cached +- return c.storage.Count(gvk) +-} +- +-func (c *cache) Checksum(gvk schema.GroupVersionKind, uid runtime.UID) (string, error) { +- // The cache is transparent about the checksums +- return c.storage.Checksum(gvk, uid) +-} +- +-func (c *cache) RawStorage() storage.RawStorage { +- return c.storage.RawStorage() +-} +- +-func (c *cache) Close() error { +- return c.storage.Close() +-} +- +-func (c *cache) Flush() error { +- // Load the entire cache +- allObjects, err := c.index.loadAll() +- if err != nil { +- return err +- } +- +- for _, obj := range allObjects { +- // Request the storage to save each Object +- if err := c.storage.Set(obj); err != nil { +- return err +- } +- } +- +- return nil +-} +- +-// PartialObjectFrom is used to create a bound PartialObjectImpl from an Object. +-// Note: This might be useful later (maybe here or maybe in pkg/runtime) if re-enable the cache +-func PartialObjectFrom(obj Object) (PartialObject, error) { +- tm, ok := obj.GetObjectKind().(*metav1.TypeMeta) +- if !ok { +- return nil, fmt.Errorf("PartialObjectFrom: Cannot cast obj to *metav1.TypeMeta, is %T", obj.GetObjectKind()) +- } +- om, ok := obj.GetObjectMeta().(*metav1.ObjectMeta) +- if !ok { +- return nil, fmt.Errorf("PartialObjectFrom: Cannot cast obj to *metav1.ObjectMeta, is %T", obj.GetObjectMeta()) +- } +- return &PartialObjectImpl{tm, om}, nil +-} +- +-*/ +diff --git a/pkg/storage/cache/index.go b/pkg/storage/cache/index.go +deleted file mode 100644 +index 326014f..0000000 +--- a/pkg/storage/cache/index.go ++++ /dev/null +@@ -1,156 +0,0 @@ +-package cache +- +-/* +- +-TODO: Revisit if we need this file/package in the future. +- +-import ( +- log "github.com/sirupsen/logrus" +- "github.com/weaveworks/libgitops/pkg/runtime" +- "github.com/weaveworks/libgitops/pkg/storage" +- "k8s.io/apimachinery/pkg/runtime/schema" +-) +- +-type index struct { +- storage storage.Storage +- objects map[schema.GroupVersionKind]map[runtime.UID]*cacheObject +-} +- +-func newIndex(storage storage.Storage) *index { +- return &index{ +- storage: storage, +- objects: make(map[schema.GroupVersionKind]map[runtime.UID]*cacheObject), +- } +-} +- +-func (i *index) loadByID(gvk schema.GroupVersionKind, uid runtime.UID) (runtime.Object, error) { +- if uids, ok := i.objects[gvk]; ok { +- if obj, ok := uids[uid]; ok { +- log.Tracef("index: cache hit for %s with UID %q", gvk.Kind, uid) +- return obj.loadFull() +- } +- } +- +- log.Tracef("index: cache miss for %s with UID %q", gvk.Kind, uid) +- return nil, nil +-} +- +-func (i *index) loadAll() ([]runtime.Object, error) { +- var size uint64 +- +- for gvk := range i.objects { +- size += i.count(gvk) +- } +- +- all := make([]runtime.Object, 0, size) +- +- for gvk := range i.objects { +- if objects, err := i.list(gvk); err == nil { +- all = append(all, objects...) +- } else { +- return nil, err +- } +- } +- +- return all, nil +-} +- +-func store(i *index, obj runtime.Object, apiType bool) error { +- // If store is called for an invalid Object lacking an UID, +- // panic and print the stack trace. This should never happen. +- if obj.GetUID() == "" { +- panic("Attempt to cache invalid Object: missing UID") +- } +- +- co, err := newCacheObject(i.storage, obj, apiType) +- if err != nil { +- return err +- } +- +- gvk := co.object.GetObjectKind().GroupVersionKind() +- +- if _, ok := i.objects[gvk]; !ok { +- i.objects[gvk] = make(map[runtime.UID]*cacheObject) +- } +- +- log.Tracef("index: storing %s object with UID %q, meta: %t", gvk.Kind, obj.GetName(), apiType) +- i.objects[gvk][co.object.GetUID()] = co +- +- return nil +-} +- +-func (i *index) store(obj runtime.Object) error { +- return store(i, obj, false) +-} +- +-func (i *index) storeAll(objs []runtime.Object) (err error) { +- for _, obj := range objs { +- if err = i.store(obj); err != nil { +- break +- } +- } +- +- return +-} +- +-func (i *index) storeMeta(obj runtime.Object) error { +- return store(i, obj, true) +-} +- +-func (i *index) storeAllMeta(objs []runtime.Object) (err error) { +- for _, obj := range objs { +- if uids, ok := i.objects[obj.GetObjectKind().GroupVersionKind()]; ok { +- if _, ok := uids[obj.GetUID()]; ok { +- continue +- } +- } +- +- if err = i.storeMeta(obj); err != nil { +- break +- } +- } +- +- return +-} +- +-func (i *index) delete(gvk schema.GroupVersionKind, uid runtime.UID) { +- if uids, ok := i.objects[gvk]; ok { +- delete(uids, uid) +- } +-} +- +-func (i *index) count(gvk schema.GroupVersionKind) (count uint64) { +- count = uint64(len(i.objects[gvk])) +- log.Tracef("index: counted %d %s object(s)", count, gvk.Kind) +- return +-} +- +-func list(i *index, gvk schema.GroupVersionKind, apiTypes bool) ([]runtime.Object, error) { +- uids := i.objects[gvk] +- list := make([]runtime.Object, 0, len(uids)) +- +- log.Tracef("index: listing %s objects, meta: %t", gvk, apiTypes) +- for _, obj := range uids { +- loadFunc := obj.loadFull +- if apiTypes { +- loadFunc = obj.loadAPI +- } +- +- if result, err := loadFunc(); err != nil { +- return nil, err +- } else { +- list = append(list, result) +- } +- } +- +- return list, nil +-} +- +-func (i *index) list(gvk schema.GroupVersionKind) ([]runtime.Object, error) { +- return list(i, gvk, false) +-} +- +-func (i *index) listMeta(gvk schema.GroupVersionKind) ([]runtime.Object, error) { +- return list(i, gvk, true) +-} +-*/ +diff --git a/pkg/storage/cache/object.go b/pkg/storage/cache/object.go +deleted file mode 100644 +index c0e807c..0000000 +--- a/pkg/storage/cache/object.go ++++ /dev/null +@@ -1,96 +0,0 @@ +-package cache +- +-/* +- +-TODO: Revisit if we need this file/package in the future. +- +-import ( +- log "github.com/sirupsen/logrus" +- "github.com/weaveworks/libgitops/pkg/runtime" +- "github.com/weaveworks/libgitops/pkg/storage" +-) +- +-type cacheObject struct { +- storage storage.Storage +- object runtime.Object +- checksum string +- apiType bool +-} +- +-func newCacheObject(s storage.Storage, object runtime.Object, apiType bool) (c *cacheObject, err error) { +- c = &cacheObject{ +- storage: s, +- object: object, +- apiType: apiType, +- } +- +- if c.checksum, err = s.Checksum(c.object.GroupVersionKind(), c.object.GetUID()); err != nil { +- c = nil +- } +- +- return +-} +- +-// loadFull returns the full Object, loading it only if it hasn't been cached before or the checksum has changed +-func (c *cacheObject) loadFull() (runtime.Object, error) { +- var checksum string +- reload := c.apiType +- +- if !reload { +- if chk, err := c.storage.Checksum(c.object.GroupVersionKind(), c.object.GetUID()); err != nil { +- return nil, err +- } else if chk != c.checksum { +- log.Tracef("cacheObject: %q invalidated, checksum mismatch: %q -> %q", c.object.GetName(), c.checksum, chk) +- checksum = chk +- reload = true +- } else { +- log.Tracef("cacheObject: %q checksum: %q", c.object.GetName(), c.checksum) +- } +- } +- +- if reload { +- log.Tracef("cacheObject: full load triggered for %q", c.object.GetName()) +- obj, err := c.storage.Get(c.object.GroupVersionKind(), c.object.GetUID()) +- if err != nil { +- return nil, err +- } +- +- // Only apply the change after a successful Get +- c.object = obj +- c.apiType = false +- +- if len(checksum) > 0 { +- c.checksum = checksum +- } +- } +- +- return c.object, nil +-} +- +-// loadAPI returns the APIType of the Object, loading it only if the checksum has changed +-func (c *cacheObject) loadAPI() (runtime.Object, error) { +- if chk, err := c.storage.Checksum(c.object.GroupVersionKind(), c.object.GetUID()); err != nil { +- return nil, err +- } else if chk != c.checksum { +- log.Tracef("cacheObject: %q invalidated, checksum mismatch: %q -> %q", c.object.GetName(), c.checksum, chk) +- log.Tracef("cacheObject: API load triggered for %q", c.object.GetName()) +- obj, err := c.storage.GetMeta(c.object.GroupVersionKind(), c.object.GetUID()) +- if err != nil { +- return nil, err +- } +- +- // Only apply the change after a successful GetMeta +- c.object = obj +- c.checksum = chk +- c.apiType = true +- } else { +- log.Tracef("cacheObject: %q checksum: %q", c.object.GetName(), c.checksum) +- } +- +- if c.apiType { +- return c.object, nil +- } +- +- return runtime.PartialObjectFrom(c.object), nil +-} +-*/ +diff --git a/pkg/storage/client/client.go b/pkg/storage/client/client.go +new file mode 100644 +index 0000000..9c216a0 +--- /dev/null ++++ b/pkg/storage/client/client.go +@@ -0,0 +1,315 @@ ++package client ++ ++import ( ++ "context" ++ "errors" ++ "fmt" ++ ++ "github.com/weaveworks/libgitops/pkg/filter" ++ "github.com/weaveworks/libgitops/pkg/serializer" ++ "github.com/weaveworks/libgitops/pkg/storage/backend" ++ "github.com/weaveworks/libgitops/pkg/storage/core" ++ patchutil "github.com/weaveworks/libgitops/pkg/util/patch" ++ syncutil "github.com/weaveworks/libgitops/pkg/util/sync" ++ "k8s.io/apimachinery/pkg/api/meta" ++ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ++ "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ++ kruntime "k8s.io/apimachinery/pkg/runtime" ++ utilerrs "k8s.io/apimachinery/pkg/util/errors" ++ "k8s.io/apimachinery/pkg/util/sets" ++ "sigs.k8s.io/controller-runtime/pkg/client" ++) ++ ++// TODO: Pass an ObjectID that contains all PartialObjectMetadata info for "downstream" consumers ++// that can make use of it by "casting up". ++ ++var ( ++ // ErrUnsupportedPatchType is returned when an unsupported patch type is used ++ ErrUnsupportedPatchType = errors.New("unsupported patch type") ++) ++ ++type Reader interface { ++ client.Reader ++ BackendReader() backend.Reader ++} ++ ++type Writer interface { ++ client.Writer ++ BackendWriter() backend.Writer ++} ++ ++type StatusClient interface { ++ client.StatusClient ++ BackendStatusWriter() backend.StatusWriter ++} ++ ++// Client is an interface for persisting and retrieving API objects to/from a backend ++// One Client instance handles all different Kinds of Objects ++type Client interface { ++ Reader ++ Writer ++ // TODO: StatusClient ++ //client.Client ++} ++ ++// NewGeneric constructs a new Generic client ++// TODO: Construct the default patcher from the given scheme, make patcher an opt instead ++func NewGeneric(backend backend.Backend, patcher serializer.Patcher) (*Generic, error) { ++ if backend == nil { ++ return nil, fmt.Errorf("backend is mandatory") ++ } ++ return &Generic{backend, patcher}, nil ++} ++ ++// Generic implements the Client interface ++type Generic struct { ++ backend backend.Backend ++ patcher serializer.Patcher ++} ++ ++var _ Client = &Generic{} ++ ++func (c *Generic) Backend() backend.Backend { return c.backend } ++func (c *Generic) BackendReader() backend.Reader { return c.backend } ++func (c *Generic) BackendWriter() backend.Writer { return c.backend } ++ ++// Get returns a new Object for the resource at the specified kind/uid path, based on the file content. ++// In order to only extract the metadata of this object, pass in a *metav1.PartialObjectMetadata ++func (c *Generic) Get(ctx context.Context, key core.ObjectKey, obj core.Object) error { ++ obj.SetName(key.Name) ++ obj.SetNamespace(key.Namespace) ++ ++ return c.backend.Get(ctx, obj) ++} ++ ++// List lists Objects for the specific kind. Optionally, filters can be applied (see the filter package ++// for more information, e.g. filter.NameFilter{} and filter.UIDFilter{}) ++// You can also pass in an *unstructured.UnstructuredList to get an unknown type's data or ++// *metav1.PartialObjectMetadataList to just get the metadata of all objects of the specified gvk. ++// If you do specify either an *unstructured.UnstructuredList or *metav1.PartialObjectMetadataList, ++// you need to populate TypeMeta with the GVK you want back. ++// TODO: Check if this works with metav1.List{} ++// TODO: Create constructors for the different kinds of lists? ++func (c *Generic) List(ctx context.Context, list core.ObjectList, opts ...client.ListOption) error { ++ // This call will verify that list actually is a List type. ++ gvk, err := serializer.GVKForList(list, c.Backend().Scheme()) ++ if err != nil { ++ return err ++ } ++ // This applies both upstream and custom options ++ listOpts := (&ListOptions{}).ApplyOptions(opts) ++ ++ // Get namespacing info ++ gk := gvk.GroupKind() ++ namespaced, err := c.Backend().Storage().Namespacer().IsNamespaced(gk) ++ if err != nil { ++ return err ++ } ++ ++ // By default, only search the given namespace. It is fully valid for this to be an ++ // empty string: it is the only ++ namespaces := sets.NewString(listOpts.Namespace) ++ // However, if the GroupKind is namespaced, and the given "filter namespace" in list ++ // options is empty, it means that one should list all namespaces ++ if namespaced && listOpts.Namespace == "" { ++ namespaces, err = c.Backend().ListNamespaces(ctx, gk) ++ if err != nil { ++ return err ++ } ++ } else if !namespaced && listOpts.Namespace != "" { ++ return errors.New("invalid namespace option: cannot filter namespace for root-spaced object") ++ } ++ ++ allIDs := []core.UnversionedObjectID{} ++ for ns := range namespaces { ++ ids, err := c.Backend().ListObjectIDs(ctx, gk, ns) ++ if err != nil { ++ return err ++ } ++ allIDs = append(allIDs, ids...) ++ } ++ ++ // Populate objs through the given (non-buffered) channel ++ ch := make(chan core.Object) ++ objs := make([]kruntime.Object, 0, len(allIDs)) ++ ++ // How should the object be created? ++ createFunc := createObject(gvk, c.Backend().Scheme()) ++ if serializer.IsPartialObjectList(list) { ++ createFunc = createPartialObject(gvk) ++ } else if serializer.IsUnstructuredList(list) { ++ createFunc = createUnstructuredObject(gvk) ++ } ++ // Temporary processing goroutine; execution starts instantly ++ m := syncutil.RunMonitor(func() error { ++ return c.processKeys(ctx, allIDs, &listOpts.FilterOptions, createFunc, ch) ++ }) ++ ++ for o := range ch { ++ objs = append(objs, o) ++ } ++ ++ if err := m.Wait(); err != nil { ++ return err ++ } ++ ++ // Populate the List's Items field with the objects returned ++ return meta.SetList(list, objs) ++} ++ ++func (c *Generic) Create(ctx context.Context, obj core.Object, _ ...client.CreateOption) error { ++ return c.backend.Create(ctx, obj) ++} ++ ++func (c *Generic) Update(ctx context.Context, obj core.Object, _ ...client.UpdateOption) error { ++ return c.backend.Update(ctx, obj) ++} ++ ++// Patch performs a strategic merge patch on the object with the given UID, using the byte-encoded patch given ++func (c *Generic) Patch(ctx context.Context, obj core.Object, patch core.Patch, _ ...client.PatchOption) error { ++ // Fail-fast: We must never save metadata-only structs ++ if serializer.IsPartialObject(obj) { ++ return backend.ErrCannotSaveMetadata ++ } ++ ++ // Acquire the patch data from the "desired state" object given now, i.e. in MergeFrom{} ++ // TODO: Shall we require GVK to be present here using a meta interpreter? ++ patchJSON, err := patch.Data(obj) ++ if err != nil { ++ return err ++ } ++ ++ // Load the current latest state into obj temporarily, before patching it ++ // This also validates the GVK, name and namespace. ++ if err := c.backend.Get(ctx, obj); err != nil { ++ return err ++ } ++ ++ // Get the right BytePatcher for this patch type ++ // TODO: Make this return an error ++ bytePatcher := patchutil.BytePatcherForType(patch.Type()) ++ if bytePatcher == nil { ++ return fmt.Errorf("patch type not supported: %s", patch.Type()) ++ } ++ ++ // Apply the patch into the object using the given byte patcher ++ if unstruct, ok := obj.(kruntime.Unstructured); ok { ++ // TODO: Provide an option for the schema ++ err = c.patcher.ApplyOnUnstructured(bytePatcher, patchJSON, unstruct, nil) ++ } else { ++ err = c.patcher.ApplyOnStruct(bytePatcher, patchJSON, obj) ++ } ++ if err != nil { ++ return err ++ } ++ ++ // Perform an update internally, similar to what .Update would yield ++ // TODO: Maybe write to the Storage conditionally? using DryRun all ++ return c.Update(ctx, obj) ++} ++ ++// Delete removes an Object from the backend ++// PartialObjectMetadata should work here. ++func (c *Generic) Delete(ctx context.Context, obj core.Object, _ ...client.DeleteOption) error { ++ return c.backend.Delete(ctx, obj) ++} ++ ++// DeleteAllOf deletes all matched resources by first doing a List() operation on the given GVK of ++// obj (obj is not used for anything else) and the given filters in opts. Only the Partial Meta ++func (c *Generic) DeleteAllOf(ctx context.Context, obj core.Object, opts ...client.DeleteAllOfOption) error { ++ // This applies both upstream and custom options, and propagates the options correctly to both ++ // List() and Delete() ++ customDeleteAllOpts := (&DeleteAllOfOptions{}).ApplyOptions(opts) ++ ++ // Get the GVK of the object ++ gvk, err := serializer.GVKForObject(c.Backend().Scheme(), obj) ++ if err != nil { ++ return err ++ } ++ ++ // List all matched objects for the given ListOptions, and GVK. ++ // UnstructuredList is used here so that we can use filters that operate on fields ++ list := &unstructured.UnstructuredList{} ++ list.SetGroupVersionKind(gvk) ++ if err := c.List(ctx, list, customDeleteAllOpts); err != nil { ++ return err ++ } ++ ++ // Loop through all of the matched items, and Delete them one-by-one ++ for i := range list.Items { ++ if err := c.Delete(ctx, &list.Items[i], customDeleteAllOpts); err != nil { ++ return err ++ } ++ } ++ return nil ++} ++ ++// Scheme returns the scheme this client is using. ++func (c *Generic) Scheme() *kruntime.Scheme { ++ return c.backend.Scheme() ++} ++ ++// RESTMapper returns the rest this client is using. For now, this returns nil, so don't use. ++func (c *Generic) RESTMapper() meta.RESTMapper { ++ return nil ++} ++ ++type newObjectFunc func() (core.Object, error) ++ ++func createObject(gvk core.GroupVersionKind, scheme *kruntime.Scheme) newObjectFunc { ++ return func() (core.Object, error) { ++ return NewObjectForGVK(gvk, scheme) ++ } ++} ++ ++func createPartialObject(gvk core.GroupVersionKind) newObjectFunc { ++ return func() (core.Object, error) { ++ obj := &metav1.PartialObjectMetadata{} ++ obj.SetGroupVersionKind(gvk) ++ return obj, nil ++ } ++} ++ ++func createUnstructuredObject(gvk core.GroupVersionKind) newObjectFunc { ++ return func() (core.Object, error) { ++ obj := &unstructured.Unstructured{} ++ obj.SetGroupVersionKind(gvk) ++ return obj, nil ++ } ++} ++ ++func (c *Generic) processKeys(ctx context.Context, ids []core.UnversionedObjectID, filterOpts *filter.FilterOptions, fn newObjectFunc, output chan core.Object) error { ++ goroutines := []func() error{} ++ for _, id := range ids { ++ goroutines = append(goroutines, c.processKey(ctx, id, filterOpts, fn, output)) ++ } ++ ++ defer close(output) ++ ++ return utilerrs.AggregateGoroutines(goroutines...) ++} ++ ++func (c *Generic) processKey(ctx context.Context, id core.UnversionedObjectID, filterOpts *filter.FilterOptions, fn newObjectFunc, output chan core.Object) func() error { ++ return func() error { ++ // Create a new object, and decode into it using Get ++ obj, err := fn() ++ if err != nil { ++ return err ++ } ++ ++ if err := c.Get(ctx, id.ObjectKey(), obj); err != nil { ++ return err ++ } ++ ++ // Match the object against the filters ++ matched, err := filterOpts.Match(obj) ++ if err != nil { ++ return err ++ } ++ if matched { ++ output <- obj ++ } ++ ++ return nil ++ } ++} +diff --git a/pkg/storage/client/options.go b/pkg/storage/client/options.go +new file mode 100644 +index 0000000..7fa8f8e +--- /dev/null ++++ b/pkg/storage/client/options.go +@@ -0,0 +1,75 @@ ++package client ++ ++import ( ++ "github.com/weaveworks/libgitops/pkg/filter" ++ "sigs.k8s.io/controller-runtime/pkg/client" ++) ++ ++type ListOption interface { ++ client.ListOption ++ filter.FilterOption ++} ++ ++type ListOptions struct { ++ client.ListOptions ++ filter.FilterOptions ++} ++ ++var _ ListOption = &ListOptions{} ++ ++func (o *ListOptions) ApplyToList(target *client.ListOptions) { ++ o.ListOptions.ApplyToList(target) ++} ++ ++func (o *ListOptions) ApplyToFilterOptions(target *filter.FilterOptions) { ++ o.FilterOptions.ApplyToFilterOptions(target) ++} ++ ++func (o *ListOptions) ApplyOptions(opts []client.ListOption) *ListOptions { ++ // Apply the "normal" ListOptions ++ o.ListOptions.ApplyOptions(opts) ++ // Apply all FilterOptions, if they implement that interface ++ for _, opt := range opts { ++ o.FilterOptions.ApplyOption(opt) ++ } ++ ++ // If listOpts.Namespace was given, add it to the list of ObjectFilters ++ if len(o.Namespace) != 0 { ++ o.ObjectFilters = append(o.ObjectFilters, filter.NamespaceFilter{Namespace: o.Namespace}) ++ } ++ // If listOpts.LabelSelector was given, add it to the list of ObjectFilters ++ if o.LabelSelector != nil { ++ o.ObjectFilters = append(o.ObjectFilters, filter.LabelsFilter{LabelSelector: o.LabelSelector}) ++ } ++ ++ return o ++} ++ ++type DeleteAllOfOption interface { ++ ListOption ++ client.DeleteAllOfOption ++} ++ ++type DeleteAllOfOptions struct { ++ ListOptions ++ client.DeleteOptions ++} ++ ++var _ DeleteAllOfOption = &DeleteAllOfOptions{} ++ ++func (o *DeleteAllOfOptions) ApplyToDeleteAllOf(target *client.DeleteAllOfOptions) { ++ o.DeleteOptions.ApplyToDelete(&target.DeleteOptions) ++} ++ ++func (o *DeleteAllOfOptions) ApplyOptions(opts []client.DeleteAllOfOption) *DeleteAllOfOptions { ++ // Cannot directly apply to o, hence, create a temporary object to which upstream opts are applied ++ do := (&client.DeleteAllOfOptions{}).ApplyOptions(opts) ++ o.ListOptions.ListOptions = do.ListOptions ++ o.DeleteOptions = do.DeleteOptions ++ ++ // Apply all FilterOptions, if they implement that interface ++ for _, opt := range opts { ++ o.FilterOptions.ApplyOption(opt) ++ } ++ return o ++} +diff --git a/pkg/storage/client/transactional/client.go b/pkg/storage/client/transactional/client.go +new file mode 100644 +index 0000000..1108c1d +--- /dev/null ++++ b/pkg/storage/client/transactional/client.go +@@ -0,0 +1,330 @@ ++package transactional ++ ++import ( ++ "context" ++ "crypto/rand" ++ "encoding/hex" ++ "fmt" ++ "strings" ++ "sync" ++ "sync/atomic" ++ ++ "github.com/sirupsen/logrus" ++ "github.com/weaveworks/libgitops/pkg/storage/backend" ++ "github.com/weaveworks/libgitops/pkg/storage/client" ++ "github.com/weaveworks/libgitops/pkg/storage/core" ++ utilerrs "k8s.io/apimachinery/pkg/util/errors" ++) ++ ++var _ Client = &Generic{} ++ ++func NewGeneric(c client.Client, manager BranchManager, merger BranchMerger) (Client, error) { ++ if c == nil { ++ return nil, fmt.Errorf("%w: c is required", core.ErrInvalidParameter) ++ } ++ if manager == nil { ++ return nil, fmt.Errorf("%w: manager is required", core.ErrInvalidParameter) ++ } ++ return &Generic{ ++ c: c, ++ txs: make(map[string]*txLock), ++ txsMu: &sync.Mutex{}, ++ manager: manager, ++ merger: merger, ++ }, nil ++} ++ ++type Generic struct { ++ c client.Client ++ ++ txs map[string]*txLock ++ txsMu *sync.Mutex ++ ++ // +optional ++ merger BranchMerger ++ // +required ++ manager BranchManager ++} ++ ++type txLock struct { ++ mu *sync.RWMutex ++ mode TxMode ++ // active == 1 means "transaction active, mu is locked for writing" ++ // active == 0 means "transaction has stopped, mu has been unlocked" ++ active uint32 ++} ++ ++func (c *Generic) Get(ctx context.Context, key core.ObjectKey, obj core.Object) error { ++ return c.lockForReading(ctx, func() error { ++ return c.c.Get(ctx, key, obj) ++ }) ++} ++ ++func (c *Generic) List(ctx context.Context, list core.ObjectList, opts ...core.ListOption) error { ++ return c.lockForReading(ctx, func() error { ++ return c.c.List(ctx, list, opts...) ++ }) ++} ++ ++func (c *Generic) lockForReading(ctx context.Context, operation func() error) error { ++ ref := core.GetVersionRef(ctx) ++ if !ref.IsWritable() { ++ // Never block reads for read-only VersionRefs. We know nobody can change ++ // them during the read operation, so they should be race condition-free. ++ return operation() ++ } ++ // If the VersionRef is writable; treat it as a branch and lock it to avoid ++ // race conditions. ++ return c.lockAndReadBranch(ref.String(), operation) ++} ++ ++func (c *Generic) lockAndReadBranch(branch string, callback func() error) error { ++ // Use c.txsMu to guard reads and writes to the c.txs map ++ c.txsMu.Lock() ++ // Check if information about a transaction on this branch exists. ++ txState, ok := c.txs[branch] ++ if !ok { ++ // grow the txs map by one ++ c.txs[branch] = &txLock{ ++ mu: &sync.RWMutex{}, ++ } ++ txState = c.txs[branch] ++ } ++ c.txsMu.Unlock() ++ ++ // In the atomic mode, we lock the txLock during the read, ++ // so no new transactions can be started while the read ++ // operation goes on. In non-atomic modes, reads aren't locked, ++ // instead it is assumed that downstream implementations just ++ // read the latest commit on the given branch. ++ if txState.mode == TxModeAtomic { ++ txState.mu.RLock() ++ } ++ err := callback() ++ if txState.mode == TxModeAtomic { ++ txState.mu.RUnlock() ++ } ++ return err ++} ++ ++func (c *Generic) initTx(ctx context.Context, info TxInfo) (context.Context, txFunc) { ++ // Aquire the tx-specific lock ++ c.txsMu.Lock() ++ txState, ok := c.txs[info.Head] ++ if !ok { ++ // grow the txs map by one ++ c.txs[info.Head] = &txLock{ ++ mu: &sync.RWMutex{}, ++ } ++ txState = c.txs[info.Head] ++ } ++ txState.mode = info.Options.Mode ++ c.txsMu.Unlock() ++ ++ // Wait for all reads to complete (in the case of the atomic more), ++ // and then lock for writing. For non-atomic mode this uses the mutex ++ // as it is modifying txState, and two transactions must not run at ++ // the same time for the same branch. ++ // ++ // Always lock mu when a transaction is running on this branch, ++ // regardless of mode. If atomic mode is enabled, this also waits ++ // on any reads happening at this moment. For all modes, this ensures ++ // transactions happen in order. ++ txState.mu.Lock() ++ txState.active = 1 // set tx state to "active" ++ ++ // Create a child context with a timeout ++ dlCtx, cleanupTimeout := context.WithTimeout(ctx, info.Options.Timeout) ++ ++ // This function cleans up the transaction, and unlocks the tx muted ++ cleanupFunc := func() error { ++ // Cleanup after the transaction ++ if err := c.cleanupAfterTx(ctx, &info); err != nil { ++ return fmt.Errorf("Failed to cleanup branch %s after tx: %v", info.Head, err) ++ } ++ // Unlock the mutex so new transactions can take place on this branch ++ txState.mu.Unlock() ++ return nil ++ } ++ ++ // Start waiting for the cancellation of the deadline context. ++ go func() { ++ // Wait for the context to either timeout or be cancelled ++ <-dlCtx.Done() ++ // This guard makes sure the cleanup function runs exactly ++ // once, regardless of transaction end cause. ++ if atomic.CompareAndSwapUint32(&txState.active, 1, 0) { ++ if err := cleanupFunc(); err != nil { ++ logrus.Errorf("Failed to cleanup after tx timeout: %v", err) ++ } ++ } ++ }() ++ ++ abortFunc := func() error { ++ // The transaction ended; the caller is either Abort() or ++ // at the end of a successful transaction. The cause of ++ // Abort() happening can also be a context cancellation. ++ // If the parent context was cancelled or timed out; this ++ // function and the above function race to set active => 0 ++ // Regardless, due to the atomic nature of the operation, ++ // cleanupFunc() will only be run twice. ++ if atomic.CompareAndSwapUint32(&txState.active, 1, 0) { ++ // We can now stop the timeout timer ++ cleanupTimeout() ++ // Clean up the transaction ++ return cleanupFunc() ++ } ++ return nil ++ } ++ ++ return dlCtx, abortFunc ++} ++ ++func (c *Generic) cleanupAfterTx(ctx context.Context, info *TxInfo) error { ++ // Always both clean the branch, and run post-tx tasks ++ return utilerrs.NewAggregate([]error{ ++ c.manager.ResetToCleanBranch(ctx, info.Base), ++ // TODO: should this be in its own goroutine to switch back to main ++ // ASAP? ++ c.manager.TransactionHookChain().PostTransactionHook(ctx, *info), ++ }) ++} ++ ++func (c *Generic) BackendReader() backend.Reader { ++ return c.c.BackendReader() ++} ++ ++func (c *Generic) BranchMerger() BranchMerger { ++ return c.merger ++} ++ ++func (c *Generic) BranchManager() BranchManager { ++ return c.manager ++} ++ ++func (c *Generic) Transaction(ctx context.Context, opts ...TxOption) Tx { ++ tx, err := c.transaction(ctx, opts...) ++ if err != nil { ++ panic(err) ++ } ++ return tx ++} ++ ++func (c *Generic) BranchTransaction(ctx context.Context, headBranch string, opts ...TxOption) BranchTx { ++ tx, err := c.branchTransaction(ctx, headBranch, opts...) ++ if err != nil { ++ panic(err) ++ } ++ return tx ++} ++ ++func (c *Generic) validateCtx(ctx context.Context) (core.VersionRef, error) { ++ // Check so versionref is writable ++ ref := core.GetVersionRef(ctx) ++ if !ref.IsWritable() { ++ return nil, fmt.Errorf("must not give a writable VersionRef to (Branch)Transaction()") ++ } ++ // Just return its ++ return ref, nil ++} ++ ++func (c *Generic) transaction(ctx context.Context, opts ...TxOption) (Tx, error) { ++ // Validate the versionref from the context ++ ref, err := c.validateCtx(ctx) ++ if err != nil { ++ return nil, err ++ } ++ ++ // Parse options ++ o := defaultTxOptions().ApplyOptions(opts) ++ ++ branch := ref.String() ++ info := TxInfo{ ++ Base: branch, ++ Head: branch, ++ Options: *o, ++ } ++ // Initialize the transaction ++ ctxWithDeadline, cleanupFunc := c.initTx(ctx, info) ++ ++ // Run pre-tx checks ++ err = c.manager.TransactionHookChain().PreTransactionHook(ctxWithDeadline, info) ++ ++ return &txImpl{ ++ &txCommon{ ++ err: err, ++ c: c.c, ++ manager: c.manager, ++ ctx: ctxWithDeadline, ++ info: info, ++ cleanupFunc: cleanupFunc, ++ }, ++ }, nil ++} ++ ++func (c *Generic) branchTransaction(ctx context.Context, headBranch string, opts ...TxOption) (BranchTx, error) { ++ // Validate the versionref from the context ++ ref, err := c.validateCtx(ctx) ++ if err != nil { ++ return nil, err ++ } ++ baseBranch := ref.String() ++ ++ // Append random bytes to the end of the head branch if it ends with a dash ++ if strings.HasSuffix(headBranch, "-") { ++ suffix, err := randomSHA(4) ++ if err != nil { ++ return nil, err ++ } ++ headBranch += suffix ++ } ++ ++ // Validate that the base and head branches are distinct ++ if baseBranch == headBranch { ++ return nil, fmt.Errorf("head and target branches must not be the same") ++ } ++ ++ logrus.Debugf("Base branch: %q. Head branch: %q.", baseBranch, headBranch) ++ ++ // Parse options ++ o := defaultTxOptions().ApplyOptions(opts) ++ ++ info := TxInfo{ ++ Base: baseBranch, ++ Head: headBranch, ++ Options: *o, ++ } ++ ++ // Register the head branch with the context ++ ctxWithHeadBranch := core.WithVersionRef(ctx, core.NewBranchRef(headBranch)) ++ // Initialize the transaction ++ ctxWithDeadline, cleanupFunc := c.initTx(ctxWithHeadBranch, info) ++ ++ // Run pre-tx checks and create the new branch ++ err = utilerrs.NewAggregate([]error{ ++ c.manager.TransactionHookChain().PreTransactionHook(ctxWithDeadline, info), ++ c.manager.CreateBranch(ctxWithDeadline, headBranch), ++ }) ++ ++ return &txBranchImpl{ ++ txCommon: &txCommon{ ++ err: err, ++ c: c.c, ++ manager: c.manager, ++ ctx: ctxWithDeadline, ++ info: info, ++ cleanupFunc: cleanupFunc, ++ }, ++ merger: c.merger, ++ }, nil ++} ++ ++// randomSHA returns a hex-encoded string from {byteLen} random bytes. ++func randomSHA(byteLen int) (string, error) { ++ b := make([]byte, byteLen) ++ _, err := rand.Read(b) ++ if err != nil { ++ return "", err ++ } ++ return hex.EncodeToString(b), nil ++} +diff --git a/pkg/storage/client/transactional/commit.go b/pkg/storage/client/transactional/commit.go +new file mode 100644 +index 0000000..eeb5e9f +--- /dev/null ++++ b/pkg/storage/client/transactional/commit.go +@@ -0,0 +1,126 @@ ++package transactional ++ ++import ( ++ "fmt" ++ ++ "github.com/fluxcd/go-git-providers/validation" ++) ++ ++// Commit describes a result of a transaction. ++type Commit interface { ++ // GetAuthor describes the author of this commit. ++ // +required ++ GetAuthor() CommitAuthor ++ // GetMessage describes the change in this commit. ++ // +required ++ GetMessage() CommitMessage ++ // Validate validates that all required fields are set, and given data is valid. ++ Validate() error ++} ++ ++type CommitAuthor interface { ++ // GetName describes the author's name (e.g. as per git config) ++ // +required ++ GetName() string ++ // GetEmail describes the author's email (e.g. as per git config). ++ // It is optional generally, but might be required by some specific ++ // implementations. ++ // +optional ++ GetEmail() string ++ // The String() method must return a (ideally both human- and machine- ++ // readable) concatenated string including the name and email (if ++ // applicable) of the author. ++ fmt.Stringer ++} ++ ++type CommitMessage interface { ++ // GetTitle describes the change concisely, so it can be used e.g. as ++ // a commit message or PR title. Certain implementations might enforce ++ // character limits on this string. ++ // +required ++ GetTitle() string ++ // GetDescription contains optional extra, more detailed information ++ // about the change. ++ // +optional ++ GetDescription() string ++ // The String() method must return a (ideally both human- and machine- ++ // readable) concatenated string including the title and description ++ // (if applicable) of the author. ++ fmt.Stringer ++} ++ ++// GenericCommitResult implements Commit. ++var _ Commit = GenericCommit{} ++ ++// GenericCommit implements Commit. ++type GenericCommit struct { ++ // GetAuthor describes the author of this commit. ++ // +required ++ Author CommitAuthor ++ // GetMessage describes the change in this commit. ++ // +required ++ Message CommitMessage ++} ++ ++func (r GenericCommit) GetAuthor() CommitAuthor { return r.Author } ++func (r GenericCommit) GetMessage() CommitMessage { return r.Message } ++ ++func (r GenericCommit) Validate() error { ++ v := validation.New("GenericCommit") ++ if len(r.Author.GetName()) == 0 { ++ v.Required("Author.GetName") ++ } ++ if len(r.Message.GetTitle()) == 0 { ++ v.Required("Message.GetTitle") ++ } ++ return v.Error() ++} ++ ++// GenericCommitAuthor implements CommitAuthor. ++var _ CommitAuthor = GenericCommitAuthor{} ++ ++// GenericCommit implements Commit. ++type GenericCommitAuthor struct { ++ // Name describes the author's name (as per git config) ++ // +required ++ Name string ++ // Email describes the author's email (as per git config) ++ // +optional ++ Email string ++} ++ ++func (r GenericCommitAuthor) GetName() string { return r.Name } ++func (r GenericCommitAuthor) GetEmail() string { return r.Email } ++ ++func (r GenericCommitAuthor) String() string { ++ if len(r.Email) != 0 { ++ return fmt.Sprintf("%s <%s>", r.Name, r.Email) ++ } ++ return r.Name ++} ++ ++// GenericCommitMessage implements CommitMessage. ++var _ CommitMessage = GenericCommitMessage{} ++ ++// GenericCommitMessage implements CommitMessage. ++type GenericCommitMessage struct { ++ // Title describes the change concisely, so it can be used e.g. as ++ // a commit message or PR title. Certain implementations might enforce ++ // character limits on this string. ++ // +required ++ Title string ++ // Description contains optional extra, more detailed information ++ // about the change. ++ // +optional ++ Description string ++} ++ ++func (r GenericCommitMessage) GetTitle() string { return r.Title } ++func (r GenericCommitMessage) GetDescription() string { return r.Description } ++ ++func (r GenericCommitMessage) String() string { ++ if len(r.Description) != 0 { ++ return fmt.Sprintf("%s\n\n%s", r.Title, r.Description) ++ } ++ return r.Title ++} +diff --git a/pkg/storage/client/transactional/distributed/client.go b/pkg/storage/client/transactional/distributed/client.go +new file mode 100644 +index 0000000..665c6fd +--- /dev/null ++++ b/pkg/storage/client/transactional/distributed/client.go +@@ -0,0 +1,313 @@ ++package distributed ++ ++import ( ++ "context" ++ "fmt" ++ "sync" ++ "time" ++ ++ "github.com/sirupsen/logrus" ++ "github.com/weaveworks/libgitops/pkg/storage/client/transactional" ++ "github.com/weaveworks/libgitops/pkg/storage/core" ++ "k8s.io/apimachinery/pkg/util/wait" ++) ++ ++// NewClient creates a new distributed Client using the given underlying transactional Client, ++// remote, and options that configure how the Client should respond to network partitions. ++func NewClient(c transactional.Client, remote Remote, opts ...ClientOption) (*Generic, error) { ++ if c == nil { ++ return nil, fmt.Errorf("%w: c is mandatory", core.ErrInvalidParameter) ++ } ++ if remote == nil { ++ return nil, fmt.Errorf("%w: remote is mandatory", core.ErrInvalidParameter) ++ } ++ ++ o := defaultOptions().ApplyOptions(opts) ++ ++ g := &Generic{ ++ Client: c, ++ remote: remote, ++ opts: *o, ++ branchLocks: make(map[string]*branchLock), ++ branchLocksMu: &sync.Mutex{}, ++ } ++ ++ // Register ourselves to hook into the branch manager's operations ++ c.BranchManager().CommitHookChain().Register(g) ++ c.BranchManager().TransactionHookChain().Register(g) ++ ++ return g, nil ++} ++ ++type Generic struct { ++ transactional.Client ++ remote Remote ++ opts ClientOptions ++ // branchLocks maps a given branch to a given lock the state of the branch ++ branchLocks map[string]*branchLock ++ // branchLocksMu guards branchLocks ++ branchLocksMu *sync.Mutex ++} ++ ++type branchLock struct { ++ // mu should be write-locked whenever the branch is actively running any ++ // function from the remote ++ mu *sync.RWMutex ++ // lastPull is guarded by mu, before reading, one should RLock mu ++ lastPull time.Time ++} ++ ++func (c *Generic) Get(ctx context.Context, key core.ObjectKey, obj core.Object) error { ++ return c.readWhenPossible(ctx, func() error { ++ return c.Client.Get(ctx, key, obj) ++ }) ++} ++ ++func (c *Generic) List(ctx context.Context, list core.ObjectList, opts ...core.ListOption) error { ++ return c.readWhenPossible(ctx, func() error { ++ return c.Client.List(ctx, list, opts...) ++ }) ++} ++ ++func (c *Generic) readWhenPossible(ctx context.Context, operation func() error) error { ++ ref := core.GetVersionRef(ctx) ++ // If the ref is not writable, we don't have to worry about race conditions ++ if !ref.IsWritable() { ++ return operation() ++ } ++ branch := ref.String() ++ ++ // Check if we need to do a pull before ++ if c.needsResync(branch, c.opts.CacheValidDuration) { ++ // Try to pull the remote branch. If it fails, use returnErr to figure out if ++ // this (depending on the configured PACELC mode) is a critical error, or if we ++ // should continue with the read ++ if err := c.pull(ctx, branch); err != nil { ++ if criticalErr := c.returnErr(err); criticalErr != nil { ++ return criticalErr ++ } ++ } ++ } ++ // Do the read operation ++ return operation() ++} ++ ++func (c *Generic) getBranchLockInfo(branch string) *branchLock { ++ c.branchLocksMu.Lock() ++ defer c.branchLocksMu.Unlock() ++ ++ // Check if there exists a lock for that branch ++ info, ok := c.branchLocks[branch] ++ if ok { ++ return info ++ } ++ // Write to the branchLocks map ++ c.branchLocks[branch] = &branchLock{ ++ mu: &sync.RWMutex{}, ++ } ++ return c.branchLocks[branch] ++} ++ ++func (c *Generic) needsResync(branch string, d time.Duration) bool { ++ lck := c.getBranchLockInfo(branch) ++ // Lock while reading the last resync time ++ lck.mu.RLock() ++ defer lck.mu.RUnlock() ++ // Resync if there has been no sync so far, or if the last resync was too long ago ++ return lck.lastPull.IsZero() || time.Since(lck.lastPull) > d ++} ++ ++// StartResyncLoop starts a resync loop for the given branches for ++// the given interval. ++// ++// resyncCacheInterval specifies the interval for which resyncs ++// (remote Pulls) should be run in the background. The duration must ++// be positive, and non-zero. ++// ++// resyncBranches specifies what branches to resync. The default is ++// []string{""}, i.e. only the "default" branch. ++// ++// ctx should be used to cancel the loop, if needed. ++// ++// While it is technically possible to start many of these resync ++// loops, it is not recommended. Start it once, for all the branches ++// you need. The branches will be pulled synchronously in order. The ++// resync interval is non-sliding, which means that the interval ++// includes the time of the operations. ++func (c *Generic) StartResyncLoop(ctx context.Context, resyncCacheInterval time.Duration, resyncBranches ...string) { ++ // Only start this loop if resyncCacheInterval > 0 ++ if resyncCacheInterval <= 0 { ++ logrus.Warn("No need to start the resync loop; resyncCacheInterval <= 0") ++ return ++ } ++ // If unset, only sync the default branch. ++ if resyncBranches == nil { ++ resyncBranches = []string{""} ++ } ++ ++ // Start the resync goroutine ++ go c.resyncLoop(ctx, resyncCacheInterval, resyncBranches) ++} ++ ++func (c *Generic) resyncLoop(ctx context.Context, resyncCacheInterval time.Duration, resyncBranches []string) { ++ logrus.Debug("Starting the resync loop...") ++ ++ wait.NonSlidingUntilWithContext(ctx, func(_ context.Context) { ++ ++ for _, branch := range resyncBranches { ++ logrus.Tracef("resyncLoop: Will perform pull operation on branch: %q", branch) ++ // Perform a fetch, pull & checkout of the new revision ++ if err := c.pull(ctx, branch); err != nil { ++ logrus.Errorf("resyncLoop: pull failed with error: %v", err) ++ return ++ } ++ } ++ }, resyncCacheInterval) ++ logrus.Info("Exiting the resync loop...") ++} ++ ++func (c *Generic) pull(ctx context.Context, branch string) error { ++ // Need to get the branch-specific lock variable ++ lck := c.getBranchLockInfo(branch) ++ // Write-lock while this operation is in progress ++ lck.mu.Lock() ++ defer lck.mu.Unlock() ++ ++ // Create a new context that times out after the given duration ++ pullCtx, cancel := context.WithTimeout(ctx, c.opts.PullTimeout) ++ defer cancel() ++ ++ // Make a ctx for the given branch ++ ctxForBranch := core.WithVersionRef(pullCtx, core.NewBranchRef(branch)) ++ if err := c.remote.Pull(ctxForBranch); err != nil { ++ return err ++ } ++ ++ // Register the timestamp into the lock ++ lck.lastPull = time.Now() ++ ++ // All good ++ return nil ++} ++ ++func (c *Generic) PreTransactionHook(ctx context.Context, info transactional.TxInfo) error { ++ // We count on ctx having the VersionRef registered for the head branch ++ ++ // Lock the branch for writing, if supported by the remote ++ // If the lock fails, we DO NOT try to pull, but just exit (either with err or a nil error, ++ // depending on the configured PACELC mode) ++ // TODO: Can we rely on the timeout being exact enough here? ++ // TODO: How to do this before the branch even exists...? ++ if err := c.lock(ctx, info.Options.Timeout); err != nil { ++ return c.returnErr(err) ++ } ++ ++ // Always Pull the _base_ branch before a transaction, to be up-to-date ++ // before creating the new head branch ++ if err := c.pull(ctx, info.Base); err != nil { ++ return c.returnErr(err) ++ } ++ ++ // All good ++ return nil ++} ++ ++func (c *Generic) PreCommitHook(ctx context.Context, commit transactional.Commit, info transactional.TxInfo) error { ++ return nil // nothing to do here ++} ++ ++func (c *Generic) PostCommitHook(ctx context.Context, _ transactional.Commit, _ transactional.TxInfo) error { ++ // Push the branch in the ctx ++ if err := c.push(ctx); err != nil { ++ return c.returnErr(err) ++ } ++ return nil ++} ++ ++func (c *Generic) PostTransactionHook(ctx context.Context, info transactional.TxInfo) error { ++ // Unlock the head branch, if supported ++ if err := c.unlock(ctx); err != nil { ++ return c.returnErr(err) ++ } ++ ++ return nil ++} ++ ++func (c *Generic) Remote() Remote { ++ return c.remote ++} ++ ++// note: this must ONLY be called from such functions where it is guaranteed that the ++// ctx contains a branch versionref. ++func (c *Generic) branchFromCtx(ctx context.Context) string { ++ return core.GetVersionRef(ctx).String() ++} ++ ++func (c *Generic) returnErr(err error) error { ++ // If RemoteErrorStream isn't defined, just pass the error through ++ if c.opts.RemoteErrorStream == nil { ++ return err ++ } ++ // Non-blocking send to the channel, and no return error ++ go func() { ++ c.opts.RemoteErrorStream <- err ++ }() ++ return nil ++} ++ ++func (c *Generic) lock(ctx context.Context, d time.Duration) error { ++ lr, ok := c.remote.(LockableRemote) ++ if !ok { ++ return nil ++ } ++ ++ // Need to get the branch-specific lock variable ++ lck := c.getBranchLockInfo(c.branchFromCtx(ctx)) ++ // Write-lock while this operation is in progress ++ lck.mu.Lock() ++ defer lck.mu.Unlock() ++ ++ // Enforce a timeout ++ lockCtx, cancel := context.WithTimeout(ctx, c.opts.LockTimeout) ++ defer cancel() ++ ++ return lr.Lock(lockCtx, d) ++} ++ ++func (c *Generic) unlock(ctx context.Context) error { ++ lr, ok := c.remote.(LockableRemote) ++ if !ok { ++ return nil ++ } ++ ++ // Need to get the branch-specific lock variable ++ lck := c.getBranchLockInfo(c.branchFromCtx(ctx)) ++ // Write-lock while this operation is in progress ++ lck.mu.Lock() ++ defer lck.mu.Unlock() ++ ++ // Enforce a timeout ++ unlockCtx, cancel := context.WithTimeout(ctx, c.opts.LockTimeout) ++ defer cancel() ++ ++ return lr.Unlock(unlockCtx) ++} ++ ++func (c *Generic) push(ctx context.Context) error { ++ // Need to get the branch-specific lock variable ++ lck := c.getBranchLockInfo(c.branchFromCtx(ctx)) ++ // Write-lock while this operation is in progress ++ lck.mu.Lock() ++ defer lck.mu.Unlock() ++ ++ // Create a new context that times out after the given duration ++ pushCtx, cancel := context.WithTimeout(ctx, c.opts.PushTimeout) ++ defer cancel() ++ ++ // Push the head branch using the remote ++ // If the Push fails, don't execute any other later statements ++ if err := c.remote.Push(pushCtx); err != nil { ++ return err ++ } ++ return nil ++} +diff --git a/pkg/storage/client/transactional/distributed/git/git.go b/pkg/storage/client/transactional/distributed/git/git.go +new file mode 100644 +index 0000000..53cf157 +--- /dev/null ++++ b/pkg/storage/client/transactional/distributed/git/git.go +@@ -0,0 +1,368 @@ ++package git ++ ++import ( ++ "context" ++ "errors" ++ "fmt" ++ "io/ioutil" ++ "os" ++ "sync" ++ "time" ++ ++ "github.com/fluxcd/go-git-providers/gitprovider" ++ git "github.com/go-git/go-git/v5" ++ "github.com/go-git/go-git/v5/plumbing" ++ "github.com/go-git/go-git/v5/plumbing/object" ++ log "github.com/sirupsen/logrus" ++ "github.com/weaveworks/libgitops/pkg/storage/client/transactional" ++ "github.com/weaveworks/libgitops/pkg/storage/client/transactional/distributed" ++) ++ ++var ( ++ // ErrNotStarted happens if you try to operate on the LocalClone before you have started ++ // it with StartCheckoutLoop. ++ ErrNotStarted = errors.New("the LocalClone hasn't been started (and hence, cloned) yet") ++ // ErrCannotWriteToReadOnly happens if you try to do a write operation for a non-authenticated Git repo. ++ ErrCannotWriteToReadOnly = errors.New("the LocalClone is read-only, cannot write") ++) ++ ++const ( ++ defaultBranch = "master" ++) ++ ++// LocalCloneOptions provides options for the LocalClone. ++// TODO: Refactor this into the controller-runtime Options factory pattern. ++type LocalCloneOptions struct { ++ Branch string // default "master" ++ ++ // Authentication method. If unspecified, this clone is read-only. ++ AuthMethod AuthMethod ++} ++ ++func (o *LocalCloneOptions) Default() { ++ if o.Branch == "" { ++ o.Branch = defaultBranch ++ } ++} ++ ++// LocalClone is an implementation of both a Remote, and a BranchManager, for Git. ++var _ transactional.BranchManager = &LocalClone{} ++var _ distributed.Remote = &LocalClone{} ++ ++// Create a new Remote and BranchManager implementation using Git. The repo is cloned immediately ++// in the constructor, you can use ctx to enforce a timeout for the clone. ++func NewLocalClone(ctx context.Context, repoRef gitprovider.RepositoryRef, opts LocalCloneOptions) (*LocalClone, error) { ++ log.Info("Initializing the Git repo...") ++ ++ // Default the options ++ opts.Default() ++ ++ // Create a temporary directory for the clone ++ tmpDir, err := ioutil.TempDir("", "libgitops") ++ if err != nil { ++ return nil, err ++ } ++ log.Debugf("Created temporary directory for the git clone at %q", tmpDir) ++ ++ d := &LocalClone{ ++ repoRef: repoRef, ++ opts: opts, ++ cloneDir: tmpDir, ++ lock: &sync.Mutex{}, ++ commitHooks: &transactional.MultiCommitHook{}, ++ txHooks: &transactional.MultiTransactionHook{}, ++ } ++ ++ log.Trace("URL endpoint parsed and authentication method chosen") ++ ++ if d.canWrite() { ++ log.Infof("Running in read-write mode, will commit back current status to the repo") ++ } else { ++ log.Infof("Running in read-only mode, won't write status back to the repo") ++ } ++ ++ // Clone the repo ++ if err := d.clone(ctx); err != nil { ++ return nil, err ++ } ++ ++ return d, nil ++} ++ ++// LocalClone is an implementation of both a Remote, and a BranchManager, for Git. ++type LocalClone struct { ++ // user-specified options ++ repoRef gitprovider.RepositoryRef ++ opts LocalCloneOptions ++ ++ // the temporary directory used for the clone ++ cloneDir string ++ ++ // go-git objects. wt is the worktree of the repo, persistent during the lifetime of repo. ++ repo *git.Repository ++ wt *git.Worktree ++ ++ // the lock for git operations (so no ops are done simultaneously) ++ lock *sync.Mutex ++ ++ commitHooks transactional.CommitHookChain ++ txHooks transactional.TransactionHookChain ++} ++ ++func (d *LocalClone) CommitHookChain() transactional.CommitHookChain { ++ return d.commitHooks ++} ++ ++func (d *LocalClone) TransactionHookChain() transactional.TransactionHookChain { ++ return d.txHooks ++} ++ ++func (d *LocalClone) Dir() string { ++ return d.cloneDir ++} ++ ++func (d *LocalClone) MainBranch() string { ++ return d.opts.Branch ++} ++ ++func (d *LocalClone) RepositoryRef() gitprovider.RepositoryRef { ++ return d.repoRef ++} ++ ++func (d *LocalClone) canWrite() bool { ++ return d.opts.AuthMethod != nil ++} ++ ++// verifyRead makes sure it's ok to start a read-something-from-git process ++func (d *LocalClone) verifyRead() error { ++ // Safeguard against not starting yet ++ if d.wt == nil { ++ return fmt.Errorf("cannot pull: %w", ErrNotStarted) ++ } ++ return nil ++} ++ ++// verifyWrite makes sure it's ok to start a write-something-to-git process ++func (d *LocalClone) verifyWrite() error { ++ // We need all read privileges first ++ if err := d.verifyRead(); err != nil { ++ return err ++ } ++ // Make sure we don't write to a possibly read-only repo ++ if !d.canWrite() { ++ return ErrCannotWriteToReadOnly ++ } ++ return nil ++} ++ ++func (d *LocalClone) clone(ctx context.Context) error { ++ // Lock the mutex now that we're starting, and unlock it when exiting ++ d.lock.Lock() ++ defer d.lock.Unlock() ++ ++ cloneURL := d.repoRef.GetCloneURL(d.opts.AuthMethod.TransportType()) ++ ++ log.Infof("Starting to clone the repository %s", d.repoRef) ++ // Do a clone operation to the temporary directory ++ var err error ++ d.repo, err = git.PlainCloneContext(ctx, d.Dir(), false, &git.CloneOptions{ ++ URL: cloneURL, ++ Auth: d.opts.AuthMethod, ++ ReferenceName: plumbing.NewBranchReferenceName(d.opts.Branch), ++ SingleBranch: true, ++ NoCheckout: false, ++ //Depth: 1, // ref: https://github.com/src-d/go-git/issues/1143 ++ RecurseSubmodules: 0, ++ Progress: nil, ++ Tags: git.NoTags, ++ }) ++ // Handle errors ++ if errors.Is(err, context.DeadlineExceeded) { ++ return fmt.Errorf("git clone operation timed out: %w", err) ++ } else if errors.Is(err, context.Canceled) { ++ return fmt.Errorf("git clone was cancelled: %w", err) ++ } else if err != nil { ++ return fmt.Errorf("git clone error: %v", err) ++ } ++ ++ // Populate the worktree pointer ++ d.wt, err = d.repo.Worktree() ++ if err != nil { ++ return fmt.Errorf("git get worktree error: %v", err) ++ } ++ ++ // Get the latest HEAD commit and report it to the user ++ ref, err := d.repo.Head() ++ if err != nil { ++ return err ++ } ++ ++ log.Infof("Repo cloned; HEAD commit is %s", ref.Hash()) ++ return nil ++} ++ ++func (d *LocalClone) Pull(ctx context.Context) error { ++ // Lock the mutex now that we're starting, and unlock it when exiting ++ d.lock.Lock() ++ defer d.lock.Unlock() ++ ++ // TODO: This should support doing Fetch() only maybe ++ // TODO: Remove the requirement to actually be on the branch ++ // that is being pulled. ++ ++ // Make sure it's okay to read ++ if err := d.verifyRead(); err != nil { ++ return err ++ } ++ ++ // Perform the git pull operation. The context carries a timeout ++ log.Trace("Starting pull operation") ++ err := d.wt.PullContext(ctx, &git.PullOptions{ ++ Auth: d.opts.AuthMethod, ++ SingleBranch: true, ++ }) ++ ++ // Handle errors ++ if errors.Is(err, git.NoErrAlreadyUpToDate) { ++ // all good, nothing more to do ++ log.Trace("Pull already up-to-date") ++ return nil ++ } else if errors.Is(err, context.DeadlineExceeded) { ++ return fmt.Errorf("git pull operation timed out: %w", err) ++ } else if errors.Is(err, context.Canceled) { ++ return fmt.Errorf("git pull was cancelled: %w", err) ++ } else if err != nil { ++ return fmt.Errorf("git pull error: %v", err) ++ } ++ ++ log.Trace("Pulled successfully") ++ ++ // Get current HEAD ++ ref, err := d.repo.Head() ++ if err != nil { ++ return err ++ } ++ ++ log.Infof("New commit observed %s", ref.Hash()) ++ return nil ++} ++ ++func (d *LocalClone) Push(ctx context.Context) error { ++ // TODO: Push a specific branch only. Use opts.RefSpecs? ++ ++ // Perform the git push operation. The context carries a timeout ++ log.Debug("Starting push operation") ++ err := d.repo.PushContext(ctx, &git.PushOptions{ ++ Auth: d.opts.AuthMethod, ++ }) ++ ++ // Handle errors ++ if errors.Is(err, git.NoErrAlreadyUpToDate) { ++ // TODO: Is it good if there's nothing more to do; or a failure if there's nothing to push? ++ log.Trace("Push already up-to-date") ++ return nil ++ } else if errors.Is(err, context.DeadlineExceeded) { ++ return fmt.Errorf("git push operation timed out: %w", err) ++ } else if errors.Is(err, context.Canceled) { ++ return fmt.Errorf("git push was cancelled: %w", err) ++ } else if err != nil { ++ return fmt.Errorf("git push error: %v", err) ++ } ++ ++ log.Trace("Pushed successfully") ++ ++ return nil ++} ++ ++func (d *LocalClone) CreateBranch(_ context.Context, branch string) error { ++ // Lock the mutex now that we're starting, and unlock it when exiting ++ d.lock.Lock() ++ defer d.lock.Unlock() ++ ++ // TODO: Should the caller do a force-reset using ResetToCleanBranch before creating the branch? ++ ++ // Make sure it's okay to write ++ if err := d.verifyWrite(); err != nil { ++ return err ++ } ++ ++ return d.wt.Checkout(&git.CheckoutOptions{ ++ Branch: plumbing.NewBranchReferenceName(branch), ++ Create: true, ++ }) ++} ++ ++func (d *LocalClone) ResetToCleanBranch(_ context.Context, branch string) error { ++ // Lock the mutex now that we're starting, and unlock it when exiting ++ d.lock.Lock() ++ defer d.lock.Unlock() ++ ++ // Make sure it's okay to write ++ if err := d.verifyWrite(); err != nil { ++ return err ++ } ++ ++ // Best-effort clean ++ _ = d.wt.Clean(&git.CleanOptions{ ++ Dir: true, ++ }) ++ // Force-checkout the main branch ++ return d.wt.Checkout(&git.CheckoutOptions{ ++ Branch: plumbing.NewBranchReferenceName(branch), ++ Force: true, ++ }) ++ // TODO: Do a pull here too? ++} ++ ++// Commit creates a commit of all changes in the current worktree with the given parameters. ++// It also automatically pushes the branch after the commit. ++// ErrNotStarted is returned if the repo hasn't been cloned yet. ++// ErrCannotWriteToReadOnly is returned if opts.AuthMethod wasn't provided. ++func (d *LocalClone) Commit(ctx context.Context, commit transactional.Commit) error { ++ // Lock the mutex now that we're starting, and unlock it when exiting ++ d.lock.Lock() ++ defer d.lock.Unlock() ++ ++ // Make sure it's okay to write ++ if err := d.verifyWrite(); err != nil { ++ return err ++ } ++ ++ s, err := d.wt.Status() ++ if err != nil { ++ return fmt.Errorf("git status failed: %v", err) ++ } ++ if s.IsClean() { ++ log.Debugf("No changed files in git repo, nothing to commit...") ++ // TODO: Should this be an error instead? ++ return nil ++ } ++ ++ // Do a commit ++ log.Debug("Committing all local changes") ++ hash, err := d.wt.Commit(commit.GetMessage().String(), &git.CommitOptions{ ++ All: true, ++ Author: &object.Signature{ ++ Name: commit.GetAuthor().GetName(), ++ Email: commit.GetAuthor().GetEmail(), ++ When: time.Now(), ++ }, ++ }) ++ if err != nil { ++ return fmt.Errorf("git commit error: %v", err) ++ } ++ ++ // Notify upstream that we now have a new commit, and allow writing again ++ log.Infof("A new commit has been created: %q", hash) ++ return nil ++} ++ ++// Cleanup cancels running goroutines and operations, and removes the temporary clone directory ++func (d *LocalClone) Cleanup() error { ++ // Remove the temporary directory ++ if err := os.RemoveAll(d.Dir()); err != nil { ++ log.Errorf("Failed to clean up temp git directory: %v", err) ++ return err ++ } ++ return nil ++} +diff --git a/pkg/storage/client/transactional/distributed/git/github/github.go b/pkg/storage/client/transactional/distributed/git/github/github.go +new file mode 100644 +index 0000000..23a2012 +--- /dev/null ++++ b/pkg/storage/client/transactional/distributed/git/github/github.go +@@ -0,0 +1,182 @@ ++package github ++ ++import ( ++ "context" ++ "errors" ++ "fmt" ++ ++ "github.com/fluxcd/go-git-providers/github" ++ "github.com/fluxcd/go-git-providers/gitprovider" ++ "github.com/fluxcd/go-git-providers/validation" ++ gogithub "github.com/google/go-github/v32/github" ++ "github.com/sirupsen/logrus" ++ "github.com/weaveworks/libgitops/pkg/storage/client/transactional" ++) ++ ++// PullRequest can be returned from a TransactionFunc instead of a CommitResult, if ++// a PullRequest is desired to be created by the PullRequestProvider. ++type PullRequest interface { ++ // PullRequestResult is a superset of CommitResult ++ transactional.Commit ++ ++ // GetLabels specifies what labels should be applied on the PR. ++ // +optional ++ GetLabels() []string ++ // GetAssignees specifies what user login names should be assigned to this PR. ++ // Note: Only users with "pull" access or more can be assigned. ++ // +optional ++ GetAssignees() []string ++ // GetMilestone specifies what milestone this should be attached to. ++ // +optional ++ GetMilestone() string ++} ++ ++// GenericPullRequest implements PullRequest. ++var _ PullRequest = GenericPullRequest{} ++ ++// GenericPullRequest implements PullRequest. ++type GenericPullRequest struct { ++ // GenericPullRequest is a superset of a Commit. ++ transactional.Commit ++ ++ // Labels specifies what labels should be applied on the PR. ++ // +optional ++ Labels []string ++ // Assignees specifies what user login names should be assigned to this PR. ++ // Note: Only users with "pull" access or more can be assigned. ++ // +optional ++ Assignees []string ++ // Milestone specifies what milestone this should be attached to. ++ // +optional ++ Milestone string ++} ++ ++func (r GenericPullRequest) GetLabels() []string { return r.Labels } ++func (r GenericPullRequest) GetAssignees() []string { return r.Assignees } ++func (r GenericPullRequest) GetMilestone() string { return r.Milestone } ++ ++func (r GenericPullRequest) Validate() error { ++ v := validation.New("GenericPullRequest") ++ // Just validate the "inner" object ++ v.Append(r.Commit.Validate(), r.Commit, "Commit") ++ return v.Error() ++} ++ ++// TODO: This package should really only depend on go-git-providers' abstraction interface ++ ++var ErrProviderNotSupported = errors.New("only the Github go-git-providers provider is supported at the moment") ++ ++// NewGitHubPRCommitHandler returns a new transactional.CommitHandler from a gitprovider.Client. ++func NewGitHubPRCommitHandler(c gitprovider.Client, repoRef gitprovider.RepositoryRef) (transactional.CommitHook, error) { ++ // Make sure a Github client was passed ++ if c.ProviderID() != github.ProviderID { ++ return nil, ErrProviderNotSupported ++ } ++ return &prCreator{c, repoRef}, nil ++} ++ ++type prCreator struct { ++ c gitprovider.Client ++ repoRef gitprovider.RepositoryRef ++} ++ ++func (c *prCreator) PreCommitHook(ctx context.Context, commit transactional.Commit, info transactional.TxInfo) error { ++ return nil ++} ++ ++func (c *prCreator) PostCommitHook(ctx context.Context, commit transactional.Commit, info transactional.TxInfo) error { ++ // First, validate the input ++ if err := commit.Validate(); err != nil { ++ return fmt.Errorf("given transactional.Commit wasn't valid") ++ } ++ ++ prCommit, ok := commit.(PullRequest) ++ if !ok { ++ return nil ++ } ++ ++ // Use the "raw" go-github client to do this ++ ghClient := c.c.Raw().(*gogithub.Client) ++ ++ // Helper variables ++ owner := c.repoRef.GetIdentity() ++ repo := c.repoRef.GetRepository() ++ var body *string ++ if commit.GetMessage().GetDescription() != "" { ++ body = gogithub.String(commit.GetMessage().GetDescription()) ++ } ++ ++ // Create the Pull Request ++ prPayload := &gogithub.NewPullRequest{ ++ Head: gogithub.String(info.Head), ++ Base: gogithub.String(info.Base), ++ Title: gogithub.String(commit.GetMessage().GetTitle()), ++ Body: body, ++ } ++ logrus.Infof("GitHub PR payload: %+v", prPayload) ++ pr, _, err := ghClient.PullRequests.Create(ctx, owner, repo, prPayload) ++ if err != nil { ++ return err ++ } ++ ++ // If spec.GetMilestone() is set, fetch the ID of the milestone ++ // Only set milestoneID to non-nil if specified ++ var milestoneID *int ++ if len(prCommit.GetMilestone()) != 0 { ++ milestoneID, err = getMilestoneID(ctx, ghClient, owner, repo, prCommit.GetMilestone()) ++ if err != nil { ++ return err ++ } ++ } ++ ++ // Only set assignees to non-nil if specified ++ var assignees *[]string ++ if a := prCommit.GetAssignees(); len(a) != 0 { ++ assignees = &a ++ } ++ ++ // Only set labels to non-nil if specified ++ var labels *[]string ++ if l := prCommit.GetLabels(); len(l) != 0 { ++ labels = &l ++ } ++ ++ // Only PATCH the PR if any of the fields were set ++ if milestoneID != nil || assignees != nil || labels != nil { ++ _, _, err := ghClient.Issues.Edit(ctx, owner, repo, pr.GetNumber(), &gogithub.IssueRequest{ ++ Milestone: milestoneID, ++ Assignees: assignees, ++ Labels: labels, ++ }) ++ if err != nil { ++ return err ++ } ++ } ++ ++ return nil ++} ++ ++func getMilestoneID(ctx context.Context, c *gogithub.Client, owner, repo, milestoneName string) (*int, error) { ++ // List all milestones in the repo ++ // TODO: This could/should use pagination ++ milestones, _, err := c.Issues.ListMilestones(ctx, owner, repo, &gogithub.MilestoneListOptions{ ++ State: "all", ++ }) ++ if err != nil { ++ return nil, err ++ } ++ // Loop through all milestones, search for one with the right name ++ for _, milestone := range milestones { ++ // Only consider a milestone with the right name ++ if milestone.GetTitle() != milestoneName { ++ continue ++ } ++ // Validate nil to avoid panics ++ if milestone.Number == nil { ++ return nil, fmt.Errorf("didn't expect milestone Number to be nil: %v", milestone) ++ } ++ // Return the Milestone number ++ return milestone.Number, nil ++ } ++ return nil, fmt.Errorf("couldn't find milestone with name: %s", milestoneName) ++} +diff --git a/pkg/gitdir/transport.go b/pkg/storage/client/transactional/distributed/git/transport.go +similarity index 97% +rename from pkg/gitdir/transport.go +rename to pkg/storage/client/transactional/distributed/git/transport.go +index df2c325..3017853 100644 +--- a/pkg/gitdir/transport.go ++++ b/pkg/storage/client/transactional/distributed/git/transport.go +@@ -1,10 +1,10 @@ +-package gitdir ++package git + + import ( + "errors" + + "github.com/fluxcd/go-git-providers/gitprovider" +- "github.com/fluxcd/toolkit/pkg/ssh/knownhosts" ++ "github.com/fluxcd/pkg/ssh/knownhosts" + "github.com/go-git/go-git/v5/plumbing/transport" + "github.com/go-git/go-git/v5/plumbing/transport/http" + "github.com/go-git/go-git/v5/plumbing/transport/ssh" +diff --git a/pkg/storage/client/transactional/distributed/interfaces.go b/pkg/storage/client/transactional/distributed/interfaces.go +new file mode 100644 +index 0000000..8110599 +--- /dev/null ++++ b/pkg/storage/client/transactional/distributed/interfaces.go +@@ -0,0 +1,75 @@ ++package distributed ++ ++import ( ++ "context" ++ "time" ++ ++ "github.com/weaveworks/libgitops/pkg/storage/client/transactional" ++) ++ ++// Client is a client that can sync state with a remote in a transactional way. ++type Client interface { ++ // The distributed Client extends the transactional Client ++ transactional.Client ++ // This Client is itself both a CommitHook and TransactionHook; these should ++ // be automatically registered with the transactional.Client's BranchManager ++ // in this Client's constructor. ++ transactional.CommitHook ++ transactional.TransactionHook ++ ++ // StartResyncLoop starts a resync loop for the given branches for ++ // the given interval. ++ // ++ // resyncCacheInterval specifies the interval for which resyncs ++ // (remote Pulls) should be run in the background. The duration must ++ // be positive, and non-zero. ++ // ++ // resyncBranches specifies what branches to resync. The default is ++ // []string{""}, i.e. only the "default" branch. ++ // ++ // ctx should be used to cancel the loop, if needed. ++ // ++ // While it is technically possible to start many of these resync ++ // loops, it is not recommended. Start it once, for all the branches ++ // you need. The branches will be pulled synchronously in order. The ++ // resync interval is non-sliding, which means that the interval ++ // includes the time of the operations. ++ StartResyncLoop(ctx context.Context, resyncCacheInterval time.Duration, resyncBranches ...string) ++ ++ // Remote exposes the underlying remote used ++ Remote() Remote ++} ++ ++type Remote interface { ++ // Push pushes the attached branch (of the ctx) to the remote. ++ // Push must block as long as the operation is in progress, but also ++ // respect the timeout set on ctx and return instantly after it expires. ++ // ++ // It is guaranteed that Pull() and Push() are never called racily at ++ // the same time for the same branch, BUT Pull() and Push() might be called ++ // at the same time in any order for distinct branches. If the underlying ++ // Remote transport only supports one "writer transport" to it at the same time, ++ // the Remote must coordinate pulls and pushes with a mutex internally. ++ Push(ctx context.Context) error ++ ++ // Pull pulls the attached branch (of the ctx) from the remote. ++ // Pull must block as long as the operation is in progress, but also ++ // respect the timeout set on ctx and return instantly after it expires. ++ // ++ // It is guaranteed that Pull() and Push() are never called racily at ++ // the same time for the same branch, BUT Pull() and Push() might be called ++ // at the same time in any order for distinct branches. If the underlying ++ // Remote transport only supports one "writer transport" to it at the same time, ++ // the Remote must coordinate pulls and pushes with a mutex internally. ++ Pull(ctx context.Context) error ++} ++ ++// LockableRemote describes a remote that supports locking a remote branch for writing. ++type LockableRemote interface { ++ Remote ++ ++ // Lock locks the branch attached to the context for writing, for the given duration. ++ Lock(ctx context.Context, d time.Duration) error ++ // Unlock reverses the write lock created by Lock() ++ Unlock(ctx context.Context) error ++} +diff --git a/pkg/storage/client/transactional/distributed/options.go b/pkg/storage/client/transactional/distributed/options.go +new file mode 100644 +index 0000000..4640ce9 +--- /dev/null ++++ b/pkg/storage/client/transactional/distributed/options.go +@@ -0,0 +1,97 @@ ++package distributed ++ ++import "time" ++ ++// ClientOption is an interface for applying options to ClientOptions. ++type ClientOption interface { ++ ApplyToClient(*ClientOptions) ++} ++ ++// ClientOptions specify options on how the distributed client should ++// act according to the PACELC theorem. ++// ++// The following configurations correspond to the PACELC levels: ++// ++// PC/EC: CacheValidDuration == 0 && RemoteErrorStream == nil: ++// This makes every read first do a remote Pull(), and fails ++// critically if the Pull operation fails. Transactions fail ++// if Push() fails. ++// ++// PC/EL: CacheValidDuration > 0 && RemoteErrorStream == nil: ++// This makes a read do a remote Pull only if the delta between ++// the last Pull and time.Now() exceeds CacheValidDuration. ++// StartResyncLoop(resyncCacheInterval) can be used to ++// periodically Pull in the background, so that the latency ++// of reads are minimal. Transactions and reads fail if ++// Push() or Pull() fail. ++// ++// PA/EL: RemoteErrorStream != nil: ++// How often reads invoke Pull() is given by CacheValidDuration ++// and StartResyncLoop(resyncCacheInterval) as per above. ++// However, when a Pull() or Push() is invoked from a read or ++// transaction, and a network partition happens, such errors are ++// non-critical for the operation to succeed, as Availability is ++// favored and cached objects are returned. ++type ClientOptions struct { ++ // CacheValidDuration is the period of time the cache is still ++ // valid since its last resync (remote Pull). If set to 0; all ++ // reads will invoke a resync right before reading; as the cache ++ // is never valid. This option set to 0 favors Consistency over ++ // Availability. ++ // ++ // CacheValidDuration == 0 and RemoteErrorStream != nil must not ++ // be set at the same time; as they contradict. ++ // ++ // Default: 1m ++ CacheValidDuration time.Duration ++ // RemoteErrorStream specifies a stream in which to readirect ++ // errors from the remote, instead of returning them to the caller. ++ // This is useful for allowing "offline operation", and favoring ++ // Availability over Consistency when a Partition happens (i.e. ++ // the network is unreachable). In normal operation, remote Push/Pull ++ // errors would propagate to the caller and "fail" the Transaction, ++ // however, if that is not desired, those errors can be propagated ++ // here, and the caller will succeed with the transaction. ++ // Default: nil (optional) ++ RemoteErrorStream chan error ++ ++ // Default: 30s for all ++ LockTimeout time.Duration ++ PullTimeout time.Duration ++ PushTimeout time.Duration ++} ++ ++func (o *ClientOptions) ApplyToClient(target *ClientOptions) { ++ if o.CacheValidDuration != 0 { ++ target.CacheValidDuration = o.CacheValidDuration ++ } ++ if o.RemoteErrorStream != nil { ++ target.RemoteErrorStream = o.RemoteErrorStream ++ } ++ if o.LockTimeout != 0 { ++ target.LockTimeout = o.LockTimeout ++ } ++ if o.PullTimeout != 0 { ++ target.PullTimeout = o.PullTimeout ++ } ++ if o.PushTimeout != 0 { ++ target.PushTimeout = o.PushTimeout ++ } ++} ++ ++func (o *ClientOptions) ApplyOptions(opts []ClientOption) *ClientOptions { ++ for _, opt := range opts { ++ opt.ApplyToClient(o) ++ } ++ return o ++} ++ ++func defaultOptions() *ClientOptions { ++ return &ClientOptions{ ++ CacheValidDuration: 1 * time.Minute, ++ RemoteErrorStream: nil, ++ LockTimeout: 30 * time.Second, ++ PullTimeout: 30 * time.Second, ++ PushTimeout: 30 * time.Second, ++ } ++} +diff --git a/pkg/storage/client/transactional/handlers.go b/pkg/storage/client/transactional/handlers.go +new file mode 100644 +index 0000000..aa438e3 +--- /dev/null ++++ b/pkg/storage/client/transactional/handlers.go +@@ -0,0 +1,103 @@ ++package transactional ++ ++import "context" ++ ++type TxInfo struct { ++ Base string ++ Head string ++ Options TxOptions ++} ++ ++type CommitHookChain interface { ++ // The chain also itself implements CommitHook ++ CommitHook ++ // Register registers a new CommitHook to the chain ++ Register(CommitHook) ++} ++ ++type CommitHook interface { ++ PreCommitHook(ctx context.Context, commit Commit, info TxInfo) error ++ PostCommitHook(ctx context.Context, commit Commit, info TxInfo) error ++} ++ ++var _ CommitHookChain = &MultiCommitHook{} ++var _ CommitHook = &MultiCommitHook{} ++ ++type MultiCommitHook struct { ++ CommitHooks []CommitHook ++} ++ ++func (m *MultiCommitHook) Register(h CommitHook) { ++ m.CommitHooks = append(m.CommitHooks, h) ++} ++ ++func (m *MultiCommitHook) PreCommitHook(ctx context.Context, commit Commit, info TxInfo) error { ++ for _, ch := range m.CommitHooks { ++ if ch == nil { ++ continue ++ } ++ if err := ch.PreCommitHook(ctx, commit, info); err != nil { ++ return err ++ } ++ } ++ return nil ++} ++ ++func (m *MultiCommitHook) PostCommitHook(ctx context.Context, commit Commit, info TxInfo) error { ++ for _, ch := range m.CommitHooks { ++ if ch == nil { ++ continue ++ } ++ if err := ch.PostCommitHook(ctx, commit, info); err != nil { ++ return err ++ } ++ } ++ return nil ++} ++ ++type TransactionHookChain interface { ++ // The chain also itself implements TransactionHook ++ TransactionHook ++ // Register registers a new CommitHook to the chain ++ Register(TransactionHook) ++} ++ ++type TransactionHook interface { ++ PreTransactionHook(ctx context.Context, info TxInfo) error ++ PostTransactionHook(ctx context.Context, info TxInfo) error ++} ++ ++var _ TransactionHookChain = &MultiTransactionHook{} ++var _ TransactionHook = &MultiTransactionHook{} ++ ++type MultiTransactionHook struct { ++ TransactionHooks []TransactionHook ++} ++ ++func (m *MultiTransactionHook) Register(h TransactionHook) { ++ m.TransactionHooks = append(m.TransactionHooks, h) ++} ++ ++func (m *MultiTransactionHook) PreTransactionHook(ctx context.Context, info TxInfo) error { ++ for _, th := range m.TransactionHooks { ++ if th == nil { ++ continue ++ } ++ if err := th.PreTransactionHook(ctx, info); err != nil { ++ return err ++ } ++ } ++ return nil ++} ++ ++func (m *MultiTransactionHook) PostTransactionHook(ctx context.Context, info TxInfo) error { ++ for _, th := range m.TransactionHooks { ++ if th == nil { ++ continue ++ } ++ if err := th.PostTransactionHook(ctx, info); err != nil { ++ return err ++ } ++ } ++ return nil ++} +diff --git a/pkg/storage/client/transactional/interfaces.go b/pkg/storage/client/transactional/interfaces.go +new file mode 100644 +index 0000000..7371f4c +--- /dev/null ++++ b/pkg/storage/client/transactional/interfaces.go +@@ -0,0 +1,82 @@ ++package transactional ++ ++import ( ++ "context" ++ ++ "github.com/weaveworks/libgitops/pkg/storage/client" ++ "github.com/weaveworks/libgitops/pkg/storage/core" ++) ++ ++type Client interface { ++ client.Reader ++ ++ BranchManager() BranchManager ++ BranchMerger() BranchMerger ++ ++ Transaction(ctx context.Context, opts ...TxOption) Tx ++ BranchTransaction(ctx context.Context, branchName string, opts ...TxOption) BranchTx ++} ++ ++type BranchManager interface { ++ CreateBranch(ctx context.Context, branch string) error ++ ResetToCleanBranch(ctx context.Context, branch string) error ++ Commit(ctx context.Context, commit Commit) error ++ ++ // CommitHookChain must be non-nil, but can be a no-op ++ CommitHookChain() CommitHookChain ++ // TransactionHookChain must be non-nil, but can be a no-op ++ TransactionHookChain() TransactionHookChain ++} ++ ++type BranchMerger interface { ++ MergeBranches(ctx context.Context, base, head string, commit Commit) error ++} ++ ++type CustomTxFunc func(ctx context.Context) error ++ ++type Tx interface { ++ Commit(Commit) error ++ Abort(err error) error ++ ++ Client() client.Client ++ ++ Custom(CustomTxFunc) Tx ++ ++ Get(key core.ObjectKey, obj core.Object) Tx ++ List(list core.ObjectList, opts ...core.ListOption) Tx ++ ++ Create(obj core.Object, opts ...core.CreateOption) Tx ++ Update(obj core.Object, opts ...core.UpdateOption) Tx ++ Patch(obj core.Object, patch core.Patch, opts ...core.PatchOption) Tx ++ Delete(obj core.Object, opts ...core.DeleteOption) Tx ++ DeleteAllOf(obj core.Object, opts ...core.DeleteAllOfOption) Tx ++ ++ UpdateStatus(obj core.Object, opts ...core.UpdateOption) Tx ++ PatchStatus(obj core.Object, patch core.Patch, opts ...core.PatchOption) Tx ++} ++ ++type BranchTx interface { ++ CreateTx(Commit) BranchTxResult ++ Abort(err error) error ++ ++ Client() client.Client ++ ++ Custom(CustomTxFunc) BranchTx ++ ++ Get(key core.ObjectKey, obj core.Object) BranchTx ++ List(list core.ObjectList, opts ...core.ListOption) BranchTx ++ ++ Create(obj core.Object, opts ...core.CreateOption) BranchTx ++ Update(obj core.Object, opts ...core.UpdateOption) BranchTx ++ Patch(obj core.Object, patch core.Patch, opts ...core.PatchOption) BranchTx ++ Delete(obj core.Object, opts ...core.DeleteOption) BranchTx ++ DeleteAllOf(obj core.Object, opts ...core.DeleteAllOfOption) BranchTx ++ ++ UpdateStatus(obj core.Object, opts ...core.UpdateOption) BranchTx ++ PatchStatus(obj core.Object, patch core.Patch, opts ...core.PatchOption) BranchTx ++} ++ ++type BranchTxResult interface { ++ Error() error ++ MergeWithBase(Commit) error ++} +diff --git a/pkg/storage/client/transactional/options.go b/pkg/storage/client/transactional/options.go +new file mode 100644 +index 0000000..6b3679c +--- /dev/null ++++ b/pkg/storage/client/transactional/options.go +@@ -0,0 +1,66 @@ ++package transactional ++ ++import "time" ++ ++type TxOption interface { ++ ApplyToTx(*TxOptions) ++} ++ ++var _ TxOption = &TxOptions{} ++ ++func defaultTxOptions() *TxOptions { ++ return &TxOptions{ ++ Timeout: 1 * time.Minute, ++ Mode: TxModeAtomic, ++ } ++} ++ ++type TxOptions struct { ++ Timeout time.Duration ++ Mode TxMode ++} ++ ++func (o *TxOptions) ApplyToTx(target *TxOptions) { ++ if o.Timeout != 0 { ++ target.Timeout = o.Timeout ++ } ++ if len(o.Mode) != 0 { ++ target.Mode = o.Mode ++ } ++} ++ ++func (o *TxOptions) ApplyOptions(opts []TxOption) *TxOptions { ++ for _, opt := range opts { ++ opt.ApplyToTx(o) ++ } ++ return o ++} ++ ++var _ TxOption = TxMode("") ++ ++type TxMode string ++ ++const ( ++ // TxModeAtomic makes the transaction fully atomic, i.e. so ++ // that any read happening against the target branch during the ++ // lifetime of the transaction will be blocked until the completition ++ // of the transaction. ++ TxModeAtomic TxMode = "Atomic" ++ // TxModeAllowReading will allow reads targeting the given ++ // branch a transaction is executing against; but before the ++ // transaction has completed all reads will strictly return ++ // the data available prior to the transaction taking place. ++ TxModeAllowReading TxMode = "AllowReading" ++) ++ ++func (m TxMode) ApplyToTx(target *TxOptions) { ++ target.Mode = m ++} ++ ++var _ TxOption = TxTimeout(0) ++ ++type TxTimeout time.Duration ++ ++func (t TxTimeout) ApplyToTx(target *TxOptions) { ++ target.Timeout = time.Duration(t) ++} +diff --git a/pkg/storage/client/transactional/tx.go b/pkg/storage/client/transactional/tx.go +new file mode 100644 +index 0000000..30c6b6c +--- /dev/null ++++ b/pkg/storage/client/transactional/tx.go +@@ -0,0 +1,24 @@ ++package transactional ++ ++type txImpl struct { ++ *txCommon ++} ++ ++func (tx *txImpl) Commit(c Commit) error { ++ // Run the operations, and try to create the commit ++ if err := tx.tryApplyAndCommitOperations(c); err != nil { ++ // If we failed with the transaction, abort directly ++ return tx.Abort(err) ++ } ++ ++ // We successfully completed all the tasks needed ++ // Now, cleanup and unlock the branch ++ return tx.cleanupFunc() ++} ++ ++func (tx *txImpl) Custom(op CustomTxFunc) Tx { ++ tx.ops = append(tx.ops, func() error { ++ return op(tx.ctx) ++ }) ++ return tx ++} +diff --git a/pkg/storage/client/transactional/tx_branch.go b/pkg/storage/client/transactional/tx_branch.go +new file mode 100644 +index 0000000..c7011a3 +--- /dev/null ++++ b/pkg/storage/client/transactional/tx_branch.go +@@ -0,0 +1,71 @@ ++package transactional ++ ++import ( ++ "context" ++ "fmt" ++) ++ ++type txBranchImpl struct { ++ *txCommon ++ ++ merger BranchMerger ++} ++ ++func (tx *txBranchImpl) CreateTx(c Commit) BranchTxResult { ++ // Run the operations, and try to create the commit ++ if err := tx.tryApplyAndCommitOperations(c); err != nil { ++ // If we failed with the transaction, abort directly, and ++ // return the error wrapped in a BranchTxResult ++ abortErr := tx.Abort(err) ++ return newErrTxResult(abortErr) ++ } ++ ++ // We successfully completed all the tasks needed ++ // Now, cleanup and unlock the branch ++ cleanupErr := tx.cleanupFunc() ++ ++ // Allow the merger to merge, if supported ++ return &txResultImpl{ ++ err: cleanupErr, ++ ctx: tx.ctx, ++ merger: tx.merger, ++ baseBranch: tx.info.Base, ++ headBranch: tx.info.Head, ++ } ++} ++ ++func (tx *txBranchImpl) Custom(op CustomTxFunc) BranchTx { ++ tx.ops = append(tx.ops, func() error { ++ return op(tx.ctx) ++ }) ++ return tx ++} ++ ++func newErrTxResult(err error) *txResultImpl { ++ return &txResultImpl{err: err} ++} ++ ++type txResultImpl struct { ++ err error ++ ctx context.Context ++ merger BranchMerger ++ baseBranch string ++ headBranch string ++} ++ ++func (r *txResultImpl) Error() error { ++ return r.err ++} ++ ++func (r *txResultImpl) MergeWithBase(c Commit) error { ++ // If there is an internal error, return it ++ if r.err != nil { ++ return r.err ++ } ++ // Make sure we have a merger ++ if r.merger == nil { ++ return fmt.Errorf("TxResult: The BranchMerger is nil") ++ } ++ // Try to merge the branch ++ return r.merger.MergeBranches(r.ctx, r.baseBranch, r.headBranch, c) ++} +diff --git a/pkg/storage/client/transactional/tx_common.go b/pkg/storage/client/transactional/tx_common.go +new file mode 100644 +index 0000000..3448c81 +--- /dev/null ++++ b/pkg/storage/client/transactional/tx_common.go +@@ -0,0 +1,70 @@ ++package transactional ++ ++import ( ++ "context" ++ ++ "github.com/weaveworks/libgitops/pkg/storage/client" ++ utilerrs "k8s.io/apimachinery/pkg/util/errors" ++) ++ ++type txFunc func() error ++ ++type txCommon struct { ++ err error ++ c client.Client ++ manager BranchManager ++ ctx context.Context ++ ops []txFunc ++ info TxInfo ++ cleanupFunc txFunc ++} ++ ++func (tx *txCommon) Client() client.Client { ++ return tx.c ++} ++ ++func (tx *txCommon) Abort(err error) error { ++ // Run the cleanup function and return an aggregate of the two possible errors ++ return utilerrs.NewAggregate([]error{ ++ err, ++ tx.cleanupFunc(), ++ }) ++} ++ ++func (tx *txCommon) handlePreCommit(c Commit) txFunc { ++ return func() error { ++ return tx.manager.CommitHookChain().PreCommitHook(tx.ctx, c, tx.info) ++ } ++} ++ ++func (tx *txCommon) commit(c Commit) txFunc { ++ return func() error { ++ return tx.manager.Commit(tx.ctx, c) ++ } ++} ++ ++func (tx *txCommon) handlePostCommit(c Commit) txFunc { ++ return func() error { ++ return tx.manager.CommitHookChain().PostCommitHook(tx.ctx, c, tx.info) ++ } ++} ++ ++func (tx *txCommon) tryApplyAndCommitOperations(c Commit) error { ++ // If an error occurred already before, just return it directly ++ if tx.err != nil { ++ return tx.err ++ } ++ ++ // First, all registered client operations are run ++ // Then Pre-commit, commit, and post-commit functions are run ++ // If at any stage the context is cancelled, an error is returned ++ // immediately, and no more functions in the chain are run. The ++ // same goes for errors from any of the functions, the chain is ++ // immediately interrupted on errors. ++ return execTransactionsCtx(tx.ctx, append( ++ tx.ops, ++ tx.handlePreCommit(c), ++ tx.commit(c), ++ tx.handlePostCommit(c), ++ )) ++} +diff --git a/pkg/storage/client/transactional/tx_ops.go b/pkg/storage/client/transactional/tx_ops.go +new file mode 100644 +index 0000000..e0a6c37 +--- /dev/null ++++ b/pkg/storage/client/transactional/tx_ops.go +@@ -0,0 +1,105 @@ ++package transactional ++ ++import ( ++ "context" ++ ++ "github.com/weaveworks/libgitops/pkg/storage/core" ++) ++ ++func (tx *txImpl) Get(key core.ObjectKey, obj core.Object) Tx { ++ return tx.Custom(func(ctx context.Context) error { ++ return tx.c.Get(ctx, key, obj) ++ }) ++} ++func (tx *txImpl) List(list core.ObjectList, opts ...core.ListOption) Tx { ++ return tx.Custom(func(ctx context.Context) error { ++ return tx.c.List(ctx, list, opts...) ++ }) ++} ++ ++func (tx *txImpl) Create(obj core.Object, opts ...core.CreateOption) Tx { ++ return tx.Custom(func(ctx context.Context) error { ++ return tx.c.Create(ctx, obj, opts...) ++ }) ++} ++func (tx *txImpl) Update(obj core.Object, opts ...core.UpdateOption) Tx { ++ return tx.Custom(func(ctx context.Context) error { ++ return tx.c.Update(ctx, obj, opts...) ++ }) ++} ++func (tx *txImpl) Patch(obj core.Object, patch core.Patch, opts ...core.PatchOption) Tx { ++ return tx.Custom(func(ctx context.Context) error { ++ return tx.c.Patch(ctx, obj, patch, opts...) ++ }) ++} ++func (tx *txImpl) Delete(obj core.Object, opts ...core.DeleteOption) Tx { ++ return tx.Custom(func(ctx context.Context) error { ++ return tx.c.Delete(ctx, obj, opts...) ++ }) ++} ++func (tx *txImpl) DeleteAllOf(obj core.Object, opts ...core.DeleteAllOfOption) Tx { ++ return tx.Custom(func(ctx context.Context) error { ++ return tx.c.DeleteAllOf(ctx, obj, opts...) ++ }) ++} ++ ++func (tx *txImpl) UpdateStatus(obj core.Object, opts ...core.UpdateOption) Tx { ++ return tx.Custom(func(ctx context.Context) error { ++ return nil // TODO tx.c.Status().Update(ctx, obj, opts...) ++ }) ++} ++func (tx *txImpl) PatchStatus(obj core.Object, patch core.Patch, opts ...core.PatchOption) Tx { ++ return tx.Custom(func(ctx context.Context) error { ++ return nil // TODO tx.c.Status().Patch(ctx, obj, patch, opts...) ++ }) ++} ++ ++// TODO ++ ++func (tx *txBranchImpl) Get(key core.ObjectKey, obj core.Object) BranchTx { ++ return tx.Custom(func(ctx context.Context) error { ++ return tx.c.Get(ctx, key, obj) ++ }) ++} ++func (tx *txBranchImpl) List(list core.ObjectList, opts ...core.ListOption) BranchTx { ++ return tx.Custom(func(ctx context.Context) error { ++ return tx.c.List(ctx, list, opts...) ++ }) ++} ++ ++func (tx *txBranchImpl) Create(obj core.Object, opts ...core.CreateOption) BranchTx { ++ return tx.Custom(func(ctx context.Context) error { ++ return tx.c.Create(ctx, obj, opts...) ++ }) ++} ++func (tx *txBranchImpl) Update(obj core.Object, opts ...core.UpdateOption) BranchTx { ++ return tx.Custom(func(ctx context.Context) error { ++ return tx.c.Update(ctx, obj, opts...) ++ }) ++} ++func (tx *txBranchImpl) Patch(obj core.Object, patch core.Patch, opts ...core.PatchOption) BranchTx { ++ return tx.Custom(func(ctx context.Context) error { ++ return tx.c.Patch(ctx, obj, patch, opts...) ++ }) ++} ++func (tx *txBranchImpl) Delete(obj core.Object, opts ...core.DeleteOption) BranchTx { ++ return tx.Custom(func(ctx context.Context) error { ++ return tx.c.Delete(ctx, obj, opts...) ++ }) ++} ++func (tx *txBranchImpl) DeleteAllOf(obj core.Object, opts ...core.DeleteAllOfOption) BranchTx { ++ return tx.Custom(func(ctx context.Context) error { ++ return tx.c.DeleteAllOf(ctx, obj, opts...) ++ }) ++} ++ ++func (tx *txBranchImpl) UpdateStatus(obj core.Object, opts ...core.UpdateOption) BranchTx { ++ return tx.Custom(func(ctx context.Context) error { ++ return nil // TODO tx.c.Status().Update(ctx, obj, opts...) ++ }) ++} ++func (tx *txBranchImpl) PatchStatus(obj core.Object, patch core.Patch, opts ...core.PatchOption) BranchTx { ++ return tx.Custom(func(ctx context.Context) error { ++ return nil // TODO tx.c.Status().Patch(ctx, obj, patch, opts...) ++ }) ++} +diff --git a/pkg/storage/client/transactional/utils.go b/pkg/storage/client/transactional/utils.go +new file mode 100644 +index 0000000..4812266 +--- /dev/null ++++ b/pkg/storage/client/transactional/utils.go +@@ -0,0 +1,21 @@ ++package transactional ++ ++import "context" ++ ++// execTransactionsCtx executes the functions in order. Before each ++// function in the chain is run; the context is checked for errors ++// (e.g. if it has been cancelled or timed out). If a context error ++// is returned, or if a function in the chain returns an error, this ++// function returns directly, without executing the rest of the ++// functions in the chain. ++func execTransactionsCtx(ctx context.Context, funcs []txFunc) error { ++ for _, fn := range funcs { ++ if err := ctx.Err(); err != nil { ++ return err ++ } ++ if err := fn(); err != nil { ++ return err ++ } ++ } ++ return nil ++} +diff --git a/pkg/storage/client/utils.go b/pkg/storage/client/utils.go +new file mode 100644 +index 0000000..da86908 +--- /dev/null ++++ b/pkg/storage/client/utils.go +@@ -0,0 +1,23 @@ ++package client ++ ++import ( ++ "errors" ++ "fmt" ++ ++ "github.com/weaveworks/libgitops/pkg/storage/core" ++ "k8s.io/apimachinery/pkg/runtime" ++) ++ ++var ErrNoMetadata = errors.New("it is required to embed ObjectMeta into the serialized API type") ++ ++func NewObjectForGVK(gvk core.GroupVersionKind, scheme *runtime.Scheme) (core.Object, error) { ++ kobj, err := scheme.New(gvk) ++ if err != nil { ++ return nil, err ++ } ++ obj, ok := kobj.(core.Object) ++ if !ok { ++ return nil, fmt.Errorf("%w: %s", ErrNoMetadata, gvk) ++ } ++ return obj, nil ++} +diff --git a/pkg/storage/core/errors.go b/pkg/storage/core/errors.go +new file mode 100644 +index 0000000..f65895a +--- /dev/null ++++ b/pkg/storage/core/errors.go +@@ -0,0 +1,50 @@ ++package core ++ ++import ( ++ goerrors "errors" ++ ++ "k8s.io/apimachinery/pkg/api/errors" ++ "k8s.io/apimachinery/pkg/runtime/schema" ++ "k8s.io/apimachinery/pkg/util/validation/field" ++) ++ ++var ( ++ // ErrNotImplemented can be returned for implementers that do not ++ // implement a specific part of an interface. ++ ErrNotImplemented = goerrors.New("not implemented") ++ // ErrInvalidParameter specifies that a given parameter ++ // (as a public struct field or function argument) was ++ // not valid according to the specification. ++ ErrInvalidParameter = goerrors.New("invalid parameter") ++) ++ ++// StatusError is an error that supports also conversion ++// to a metav1.Status struct for more detailed information. ++type StatusError interface { ++ error ++ errors.APIStatus ++} ++ ++func NewErrNotFound(id UnversionedObjectID) StatusError { ++ return errors.NewNotFound(schema.GroupResource{ ++ Group: id.GroupKind().Group, ++ Resource: id.GroupKind().Kind, ++ }, id.ObjectKey().Name) ++} ++ ++func NewErrAlreadyExists(id UnversionedObjectID) StatusError { ++ return errors.NewAlreadyExists(schema.GroupResource{ ++ Group: id.GroupKind().Group, ++ Resource: id.GroupKind().Kind, ++ }, id.ObjectKey().Name) ++} ++ ++func NewErrInvalid(id UnversionedObjectID, errs field.ErrorList) StatusError { ++ return errors.NewInvalid(id.GroupKind(), id.ObjectKey().Name, errs) ++} ++ ++var ( ++ IsErrNotFound = errors.IsNotFound ++ IsErrAlreadyExists = errors.IsAlreadyExists ++ IsErrInvalid = errors.IsInvalid ++) +diff --git a/pkg/storage/core/interfaces.go b/pkg/storage/core/interfaces.go +new file mode 100644 +index 0000000..b25cec3 +--- /dev/null ++++ b/pkg/storage/core/interfaces.go +@@ -0,0 +1,86 @@ ++package core ++ ++import ( ++ "context" ++ ++ "k8s.io/apimachinery/pkg/runtime/schema" ++ "k8s.io/apimachinery/pkg/types" ++ "sigs.k8s.io/controller-runtime/pkg/client" ++) ++ ++// Note: package core must not depend on any other parts of the libgitops repo, possibly the serializer package as an exception. ++// Anything under k8s.io/apimachinery goes though, and important external imports ++// like github.com/spf13/afero is also ok. The pretty large sigs.k8s.io/controller-runtime ++// import is a bit sub-optimal, though. ++ ++// GroupVersionKind aliases ++type GroupKind = schema.GroupKind ++type GroupVersion = schema.GroupVersion ++type GroupVersionKind = schema.GroupVersionKind ++ ++// Client-related Object aliases ++type Object = client.Object ++type ObjectKey = types.NamespacedName ++type ObjectList = client.ObjectList ++type Patch = client.Patch ++ ++// Client-related Option aliases ++type ListOption = client.ListOption ++type CreateOption = client.CreateOption ++type UpdateOption = client.UpdateOption ++type PatchOption = client.PatchOption ++type DeleteOption = client.DeleteOption ++type DeleteAllOfOption = client.DeleteAllOfOption ++ ++// Helper functions from client. ++var ObjectKeyFromObject = client.ObjectKeyFromObject ++ ++// Namespacer is an interface that lets the caller know if a GroupKind is namespaced ++// or not. There are two ready-made implementations: ++// 1. RESTMapperToNamespacer ++// 2. NewStaticNamespacer ++type Namespacer interface { ++ // IsNamespaced returns true if the GroupKind is a namespaced type ++ IsNamespaced(gk schema.GroupKind) (bool, error) ++} ++ ++// TODO: Investigate if the ObjectRecognizer should return unversioned ++// or versioned ObjectID's ++type ObjectRecognizer interface { ++ ResolveObjectID(ctx context.Context, fileName string, content []byte) (ObjectID, error) ++} ++ ++// UnversionedObjectID represents an ID for an Object whose version is not known. ++// However, the Group, Kind, Name and optionally, Namespace is known and should ++// uniquely identify the Object at a specific moment in time. ++type UnversionedObjectID interface { ++ GroupKind() GroupKind ++ ObjectKey() ObjectKey ++ ++ WithVersion(version string) ObjectID ++} ++ ++// ObjectID is a superset of UnversionedObjectID, that also specifies an exact version. ++type ObjectID interface { ++ UnversionedObjectID ++ ++ GroupVersionKind() GroupVersionKind ++} ++ ++// VersionRef is an interface that describes a reference to a specific version ++// of Objects in a Storage or Client. ++type VersionRef interface { ++ // String returns the commit or branch name. ++ String() string ++ // IsWritable determines if the VersionRef points to such a state where it ++ // is possible to write on top of it, i.e. as in the case of a Git branch. ++ // ++ // A specific Git commit, however, isn't considered writable, as it points ++ // to a specific point in time that can't just be rewritten, (assuming this ++ // library only is additive, which it is). ++ IsWritable() bool ++ // IsZeroValue determines if this VersionRef is the "zero value", which means ++ // that the caller should figure out how to handle that the user did not ++ // give specific opinions of what version of the Object to get. ++ IsZeroValue() bool ++} +diff --git a/pkg/storage/core/namespaces.go b/pkg/storage/core/namespaces.go +new file mode 100644 +index 0000000..d0929f5 +--- /dev/null ++++ b/pkg/storage/core/namespaces.go +@@ -0,0 +1,37 @@ ++package core ++ ++import ( ++ "k8s.io/apimachinery/pkg/runtime/schema" ++) ++ ++// StaticNamespacer implements Namespacer ++var _ Namespacer = StaticNamespacer{} ++ ++// StaticNamespacer has a default policy, which is that objects are in general namespaced ++// (NamespacedIsDefaultPolicy == true), or that they are in general root-scoped ++// (NamespacedIsDefaultPolicy == false). ++// ++// To the default policy, Exceptions can be added, so that for that GroupKind, the default ++// policy is reversed. ++type StaticNamespacer struct { ++ NamespacedIsDefaultPolicy bool ++ Exceptions []schema.GroupKind ++} ++ ++func (n StaticNamespacer) IsNamespaced(gk schema.GroupKind) (bool, error) { ++ if n.NamespacedIsDefaultPolicy { ++ // namespace by default, the gks list is a list of root-scoped entities ++ return !n.gkIsException(gk), nil ++ } ++ // root by default, the gks in the list are namespaced ++ return n.gkIsException(gk), nil ++} ++ ++func (n StaticNamespacer) gkIsException(target schema.GroupKind) bool { ++ for _, gk := range n.Exceptions { ++ if gk == target { ++ return true ++ } ++ } ++ return false ++} +diff --git a/pkg/storage/core/objectid.go b/pkg/storage/core/objectid.go +new file mode 100644 +index 0000000..8dc747b +--- /dev/null ++++ b/pkg/storage/core/objectid.go +@@ -0,0 +1,29 @@ ++package core ++ ++import "k8s.io/apimachinery/pkg/runtime/schema" ++ ++// NewUnversionedObjectID creates a new UnversionedObjectID from the given GroupKind and ObjectKey. ++func NewUnversionedObjectID(gk GroupKind, key ObjectKey) UnversionedObjectID { ++ return unversionedObjectID{gk, key} ++} ++ ++type unversionedObjectID struct { ++ gk GroupKind ++ key ObjectKey ++} ++ ++func (o unversionedObjectID) GroupKind() GroupKind { return o.gk } ++func (o unversionedObjectID) ObjectKey() ObjectKey { return o.key } ++func (o unversionedObjectID) WithVersion(version string) ObjectID { return objectID{o, version} } ++ ++// NewObjectID creates a new ObjectID from the given GroupVersionKind and ObjectKey. ++func NewObjectID(gvk GroupVersionKind, key ObjectKey) ObjectID { ++ return objectID{unversionedObjectID{gvk.GroupKind(), key}, gvk.Version} ++} ++ ++type objectID struct { ++ unversionedObjectID ++ version string ++} ++ ++func (o objectID) GroupVersionKind() schema.GroupVersionKind { return o.gk.WithVersion(o.version) } +diff --git a/pkg/storage/core/recognizer.go b/pkg/storage/core/recognizer.go +new file mode 100644 +index 0000000..fac0fe1 +--- /dev/null ++++ b/pkg/storage/core/recognizer.go +@@ -0,0 +1,58 @@ ++package core ++ ++import ( ++ "context" ++ "errors" ++ "fmt" ++ ++ "github.com/weaveworks/libgitops/pkg/serializer" ++ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ++) ++ ++// SerializerObjectRecognizer implements ObjectRecognizer. ++var _ ObjectRecognizer = &SerializerObjectRecognizer{} ++ ++// SerializerObjectRecognizer is a simple implementation of ObjectRecognizer, that ++// decodes the given byte content with the assumption that it is YAML (which covers ++// both YAML and JSON formats) into a *metav1.PartialObjectMetadata, which allows ++// extracting the ObjectID from any Kubernetes API Machinery-compatible Object. ++// ++// This operation works even though *metav1.PartialObjectMetadata is not registered ++// with the underlying Scheme in any way. ++type SerializerObjectRecognizer struct { ++ // Serializer is a required field in order for ResolveObjectID to function. ++ Serializer serializer.Serializer ++ // AllowUnrecognized controls whether this implementation allows recognizing ++ // GVK combinations not known to the underlying Scheme. Default: false ++ AllowUnrecognized bool ++} ++ ++func (r *SerializerObjectRecognizer) ResolveObjectID(_ context.Context, _ string, content []byte) (ObjectID, error) { ++ if r.Serializer == nil { ++ return nil, errors.New("programmer error: SerializerObjectRecognizer.Serializer is nil") ++ } ++ metaObj := &metav1.PartialObjectMetadata{} ++ err := r.Serializer.Decoder().DecodeInto( ++ serializer.NewSingleFrameReader(content, serializer.ContentTypeYAML), ++ metaObj, ++ ) ++ if err != nil { ++ return nil, err ++ } ++ // Validate the object info ++ gvk := metaObj.GroupVersionKind() ++ if gvk.Group == "" && gvk.Version == "" { ++ return nil, fmt.Errorf(".apiVersion field must not be empty") ++ } ++ if gvk.Kind == "" { ++ return nil, fmt.Errorf(".kind field must not be empty") ++ } ++ if metaObj.Kind == "" { ++ return nil, fmt.Errorf(".metadata.name field must not be empty") ++ } ++ if !r.AllowUnrecognized && !r.Serializer.Scheme().Recognizes(gvk) { ++ return nil, fmt.Errorf("GroupVersionKind %v not recognized by the scheme", gvk) ++ } ++ ++ return NewObjectID(metaObj.GroupVersionKind(), ObjectKeyFromObject(metaObj)), nil ++} +diff --git a/pkg/storage/core/versionref.go b/pkg/storage/core/versionref.go +new file mode 100644 +index 0000000..c9b3892 +--- /dev/null ++++ b/pkg/storage/core/versionref.go +@@ -0,0 +1,80 @@ ++package core ++ ++import ( ++ "context" ++ "errors" ++) ++ ++var versionRefKey = versionRefKeyImpl{} ++ ++type versionRefKeyImpl struct{} ++ ++// WithVersionRef attaches the given VersionRef to a Context (it ++// overwrites if one already exists in ctx). The key for the ref ++// is private in this package, so one must use this function to ++// register it. ++func WithVersionRef(ctx context.Context, ref VersionRef) context.Context { ++ return context.WithValue(ctx, versionRefKey, ref) ++} ++ ++// GetVersionRef returns the VersionRef attached to this context. ++// If there is no attached VersionRef, or it is nil, a BranchRef ++// with branch "" will be returned as the "zero value" of VersionRef. ++func GetVersionRef(ctx context.Context) VersionRef { ++ r, ok := ctx.Value(versionRefKey).(VersionRef) ++ // Return default ref if none specified ++ if r == nil || !ok { ++ return NewBranchRef("") ++ } ++ return r ++} ++ ++var ErrInvalidVersionRefType = errors.New("invalid version ref type") ++ ++// NewBranchRef creates a new VersionRef for a given branch. It is ++// valid for the branch to be ""; in this case it means the "zero ++// value", or unspecified branch to be more precise, where the caller ++// can choose how to handle. ++func NewBranchRef(branch string) VersionRef { return branchRef{branch} } ++ ++// NewCommitRef creates a new VersionRef for the given commit. The ++// commit must uniquely define a certain revision precisely. It must ++// not be an empty string. ++func NewCommitRef(commit string) (VersionRef, error) { ++ if len(commit) == 0 { ++ return nil, errors.New("commit must not be an empty string") ++ } ++ return commitRef{commit}, nil ++} ++ ++// MustNewCommitRef runs NewCommitRef, but panics on errors ++func MustNewCommitRef(commit string) VersionRef { ++ ref, err := NewCommitRef(commit) ++ if err != nil { ++ panic(err) ++ } ++ return ref ++} ++ ++type branchRef struct{ branch string } ++ ++func (r branchRef) String() string { return r.branch } ++ ++// A branch is considered writable, as commits can be added to it by libgitops ++func (branchRef) IsWritable() bool { return true } ++ ++// A branch is considered the zero value if the branch is an empty string, ++// which it is e.g. when there was no VersionRef associated with a Context. ++func (r branchRef) IsZeroValue() bool { return r.branch == "" } ++ ++type commitRef struct{ commit string } ++ ++func (r commitRef) String() string { return r.commit } ++ ++// A commit is not considered writable, as it is only a read snapshot of ++// a specific point in time. ++func (commitRef) IsWritable() bool { return false } ++ ++// IsZeroValue should always return false for commits; as commit is mandatory ++// to be a non-empty string. ++func (r commitRef) IsZeroValue() bool { return r.commit == "" } +diff --git a/pkg/storage/event/event.go b/pkg/storage/event/event.go +new file mode 100644 +index 0000000..3f57fdb +--- /dev/null ++++ b/pkg/storage/event/event.go +@@ -0,0 +1,48 @@ ++package event ++ ++import ( ++ "fmt" ++ ++ "github.com/weaveworks/libgitops/pkg/storage/core" ++) ++ ++// ObjectEventType is an enum describing a change in an Object's state. ++type ObjectEventType byte ++ ++var _ fmt.Stringer = ObjectEventType(0) ++ ++const ( ++ ObjectEventNone ObjectEventType = iota // 0 ++ ObjectEventCreate // 1 ++ ObjectEventUpdate // 2 ++ ObjectEventDelete // 3 ++ ObjectEventSync // 4 ++) ++ ++func (o ObjectEventType) String() string { ++ switch o { ++ case 0: ++ return "NONE" ++ case 1: ++ return "CREATE" ++ case 2: ++ return "UPDATE" ++ case 3: ++ return "DELETE" ++ case 4: ++ return "SYNC" ++ } ++ ++ // Should never happen ++ return "UNKNOWN" ++} ++ ++// ObjectEvent describes a change that has been observed ++// for the given object with the given ID. ++type ObjectEvent struct { ++ ID core.UnversionedObjectID ++ Type ObjectEventType ++} ++ ++// ObjectEventStream is a channel of ObjectEvents ++type ObjectEventStream chan *ObjectEvent +diff --git a/pkg/storage/event/interfaces.go b/pkg/storage/event/interfaces.go +new file mode 100644 +index 0000000..b13c186 +--- /dev/null ++++ b/pkg/storage/event/interfaces.go +@@ -0,0 +1,31 @@ ++package event ++ ++import ( ++ "context" ++ "io" ++ ++ "github.com/weaveworks/libgitops/pkg/storage" ++) ++ ++// StorageCommon contains the methods that EventStorage adds to the ++// to the normal Storage. ++type StorageCommon interface { ++ // WatchForObjectEvents starts feeding ObjectEvents into the given "into" ++ // channel. The caller is responsible for setting a channel buffering ++ // limit large enough to not block normal operation. An error might ++ // be returned if a maximum amount of watches has been opened already, ++ // e.g. ErrTooManyWatches. ++ WatchForObjectEvents(ctx context.Context, into ObjectEventStream) error ++ ++ // Close closes the EventStorage and underlying resources gracefully. ++ io.Closer ++} ++ ++// EventStorage is the abstract combination of a normal Storage, and ++// a possiblility to listen for changes to objects as they change. ++// TODO: Maybe we could use some of controller-runtime's built-in functionality ++// for watching for changes? ++type EventStorage interface { ++ storage.Storage ++ StorageCommon ++} +diff --git a/pkg/storage/filesystem/dir_traversal.go b/pkg/storage/filesystem/dir_traversal.go +new file mode 100644 +index 0000000..12284d7 +--- /dev/null ++++ b/pkg/storage/filesystem/dir_traversal.go +@@ -0,0 +1,37 @@ ++package filesystem ++ ++import ( ++ "context" ++ "os" ++) ++ ++// ListValidFilesInFilesystem discovers files in the given Filesystem that has a ++// ContentType that contentTyper recognizes, and is not a path that is excluded by ++// pathExcluder. ++func ListValidFilesInFilesystem(ctx context.Context, fs Filesystem, contentTyper ContentTyper, pathExcluder PathExcluder) (files []string, err error) { ++ err = fs.Walk(ctx, "", func(path string, info os.FileInfo, err error) error { ++ if err != nil { ++ return err ++ } ++ ++ // Only include valid files ++ if !info.IsDir() && IsValidFileInFilesystem(ctx, fs, contentTyper, pathExcluder, path) { ++ files = append(files, path) ++ } ++ return nil ++ }) ++ return ++} ++ ++// IsValidFileInFilesystem checks if file (a relative path) has a ContentType ++// that contentTyper recognizes, and is not a path that is excluded by pathExcluder. ++func IsValidFileInFilesystem(ctx context.Context, fs Filesystem, contentTyper ContentTyper, pathExcluder PathExcluder, file string) bool { ++ // return false if this path should be excluded ++ if pathExcluder.ShouldExcludePath(file) { ++ return false ++ } ++ ++ // If the content type is valid for this path, err == nil => return true ++ _, err := contentTyper.ContentTypeForPath(ctx, fs, file) ++ return err == nil ++} +diff --git a/pkg/storage/filesystem/fileevents/events.go b/pkg/storage/filesystem/fileevents/events.go +new file mode 100644 +index 0000000..38c385a +--- /dev/null ++++ b/pkg/storage/filesystem/fileevents/events.go +@@ -0,0 +1,36 @@ ++package fileevents ++ ++// FileEventType is an enum describing a change in a file's state ++type FileEventType byte ++ ++const ( ++ FileEventNone FileEventType = iota // 0 ++ FileEventModify // 1 ++ FileEventDelete // 2 ++ FileEventMove // 3 ++) ++ ++func (e FileEventType) String() string { ++ switch e { ++ case 0: ++ return "NONE" ++ case 1: ++ return "MODIFY" ++ case 2: ++ return "DELETE" ++ case 3: ++ return "MOVE" ++ } ++ ++ return "UNKNOWN" ++} ++ ++// FileEvent describes a file change of a certain kind at a certain ++// (relative) path. Often emitted by FileEventsEmitter. ++type FileEvent struct { ++ Path string ++ Type FileEventType ++} ++ ++// FileEventStream is a channel of FileEvents ++type FileEventStream chan *FileEvent +diff --git a/pkg/util/watcher/filewatcher.go b/pkg/storage/filesystem/fileevents/inotify/filewatcher.go +similarity index 52% +rename from pkg/util/watcher/filewatcher.go +rename to pkg/storage/filesystem/fileevents/inotify/filewatcher.go +index 67db335..58d8518 100644 +--- a/pkg/util/watcher/filewatcher.go ++++ b/pkg/storage/filesystem/fileevents/inotify/filewatcher.go +@@ -1,46 +1,26 @@ +-package watcher ++package inotify + + import ( ++ "context" + "fmt" +- "path" ++ "path/filepath" ++ gosync "sync" + "time" + + "github.com/rjeczalik/notify" ++ "github.com/sirupsen/logrus" + log "github.com/sirupsen/logrus" ++ "github.com/weaveworks/libgitops/pkg/storage/filesystem/fileevents" + "github.com/weaveworks/libgitops/pkg/util/sync" + "golang.org/x/sys/unix" ++ "k8s.io/apimachinery/pkg/util/sets" + ) + +-const eventBuffer = 4096 // How many events and updates we can buffer before watching is interrupted + var listenEvents = []notify.Event{notify.InDelete, notify.InCloseWrite, notify.InMovedFrom, notify.InMovedTo} + +-var eventMap = map[notify.Event]FileEvent{ +- notify.InDelete: FileEventDelete, +- notify.InCloseWrite: FileEventModify, +-} +- +-// combinedEvent describes multiple events that should be concatenated into a single event +-type combinedEvent struct { +- input []notify.Event // input is a slice of events to match (in bytes, it speeds up the comparison) +- output int // output is the event's index that should be returned, negative values equal nil +-} +- +-func (c *combinedEvent) match(events notifyEvents) (notify.EventInfo, bool) { +- if len(c.input) > len(events) { +- return nil, false // Not enough events, cannot match +- } +- +- for i := 0; i < len(c.input); i++ { +- if events[i].Event() != c.input[i] { +- return nil, false +- } +- } +- +- if c.output > 0 { +- return events[c.output], true +- } +- +- return nil, true ++var eventMap = map[notify.Event]fileevents.FileEventType{ ++ notify.InDelete: fileevents.FileEventDelete, ++ notify.InCloseWrite: fileevents.FileEventModify, + } + + // combinedEvents describes the event combinations to concatenate, +@@ -54,99 +34,113 @@ var combinedEvents = []combinedEvent{ + + type notifyEvents []notify.EventInfo + type eventStream chan notify.EventInfo +-type FileUpdateStream chan *FileUpdate +- +-// Options specifies options for the FileWatcher +-type Options struct { +- // ExcludeDirs specifies what directories to not watch +- ExcludeDirs []string +- // BatchTimeout specifies the duration to wait after last event before dispatching grouped inotify events +- BatchTimeout time.Duration +- // ValidExtensions specifies what file extensions to look at +- ValidExtensions []string +-} + +-// DefaultOptions returns the default options +-func DefaultOptions() Options { +- return Options{ +- ExcludeDirs: []string{".git"}, +- BatchTimeout: 1 * time.Second, +- ValidExtensions: []string{".yaml", ".yml", ".json"}, +- } +-} ++// FileEvents is a slice of FileEvent pointers ++type FileEvents []*fileevents.FileEvent + + // NewFileWatcher returns a list of files in the watched directory in + // addition to the generated FileWatcher, it can be used to populate + // MappedRawStorage fileMappings +-func NewFileWatcher(dir string) (w *FileWatcher, files []string, err error) { +- return NewFileWatcherWithOptions(dir, DefaultOptions()) +-} ++func NewFileWatcher(dir string, opts ...FileWatcherOption) (fileevents.Emitter, error) { ++ o := defaultOptions().ApplyOptions(opts) + +-// NewFileWatcher returns a list of files in the watched directory in +-// addition to the generated FileWatcher, it can be used to populate +-// MappedRawStorage fileMappings +-func NewFileWatcherWithOptions(dir string, opts Options) (w *FileWatcher, files []string, err error) { +- w = &FileWatcher{ +- dir: dir, +- events: make(eventStream, eventBuffer), +- updates: make(FileUpdateStream, eventBuffer), +- batcher: sync.NewBatchWriter(opts.BatchTimeout), +- opts: opts, ++ w := &FileWatcher{ ++ dir: dir, ++ ++ inbound: make(eventStream, int(o.EventBufferSize)), ++ // outbound is set by WatchForFileEvents ++ outboundMu: &gosync.Mutex{}, ++ ++ suspendFiles: sets.NewString(), ++ suspendFilesMu: &gosync.Mutex{}, ++ ++ // monitor and dispatcher set by WatchForFileEvents, guarded by outboundMu ++ ++ opts: *o, ++ ++ batcher: sync.NewBatchWriter(o.BatchTimeout), + } + + log.Tracef("FileWatcher: Starting recursive watch for %q", dir) +- if err = notify.Watch(path.Join(dir, "..."), w.events, listenEvents...); err != nil { +- notify.Stop(w.events) +- } else if files, err = w.getFiles(); err == nil { +- w.monitor = sync.RunMonitor(w.monitorFunc) +- w.dispatcher = sync.RunMonitor(w.dispatchFunc) ++ if err := notify.Watch(filepath.Join(dir, "..."), w.inbound, listenEvents...); err != nil { ++ notify.Stop(w.inbound) ++ return nil, err + } + +- return ++ return w, nil + } + ++var _ fileevents.Emitter = &FileWatcher{} ++ + // FileWatcher recursively monitors changes in files in the given directory + // and sends out events based on their state changes. Only files conforming + // to validSuffix are monitored. The FileWatcher can be suspended for a single + // event at a time to eliminate updates by WatchStorage causing a loop. + type FileWatcher struct { +- dir string +- events eventStream +- updates FileUpdateStream +- suspendEvent FileEvent +- monitor *sync.Monitor +- dispatcher *sync.Monitor +- opts Options ++ dir string ++ // channels ++ inbound eventStream ++ outbound fileevents.FileEventStream ++ outboundMu *gosync.Mutex ++ // new suspend logic ++ suspendFiles sets.String ++ suspendFilesMu *gosync.Mutex ++ // goroutines ++ monitor *sync.Monitor ++ dispatcher *sync.Monitor ++ ++ // opts ++ opts FileWatcherOptions + // the batcher is used for properly sending many concurrent inotify events + // as a group, after a specified timeout. This fixes the issue of one single + // file operation being registered as many different inotify events + batcher *sync.BatchWriter + } + +-func (w *FileWatcher) monitorFunc() { ++func (w *FileWatcher) WatchForFileEvents(ctx context.Context, into fileevents.FileEventStream) error { ++ w.outboundMu.Lock() ++ defer w.outboundMu.Unlock() ++ // We don't support more than one listener ++ // TODO: maybe support many listeners in the future? ++ if w.outbound != nil { ++ return fmt.Errorf("FileWatcher: not more than one watch supported: %w", fileevents.ErrTooManyWatches) ++ } ++ w.outbound = into ++ // Start the backing goroutines ++ w.monitor = sync.RunMonitor(w.monitorFunc) ++ w.dispatcher = sync.RunMonitor(w.dispatchFunc) ++ return nil // all ok ++} ++ ++func (w *FileWatcher) monitorFunc() error { + log.Debug("FileWatcher: Monitoring thread started") + defer log.Debug("FileWatcher: Monitoring thread stopped") +- defer close(w.updates) // Close the update stream after the FileWatcher has stopped ++ defer close(w.outbound) // Close the update stream after the FileWatcher has stopped + + for { +- event, ok := <-w.events ++ event, ok := <-w.inbound + if !ok { +- return ++ logrus.Debug("FileWatcher: Got non-ok channel recieve from w.inbound, exiting monitorFunc") ++ return nil + } + + if ievent(event).Mask&unix.IN_ISDIR != 0 { + continue // Skip directories + } + +- if !w.validFile(event.Path()) { +- continue // Skip invalid files ++ // Get the relative path between the root directory and the changed file ++ // Note: This is just used for the PathExcluder, absolute paths are used ++ // in the underlying file-change computation system, until in sendUpdate ++ // where they are converted into relative paths before sending to the listener. ++ relativePath, err := filepath.Rel(w.dir, event.Path()) ++ if err != nil { ++ logrus.Errorf("FileWatcher: Error occurred when computing relative path between: %s and %s: %v", w.dir, event.Path(), err) ++ continue + } + +- updateEvent := convertEvent(event.Event()) +- if w.suspendEvent > 0 && updateEvent == w.suspendEvent { +- w.suspendEvent = 0 +- log.Debugf("FileWatcher: Skipping suspended event %s for path: %q", updateEvent, event.Path()) +- continue // Skip the suspended event ++ // The PathExcluder only operates on relative paths. ++ if w.opts.PathExcluder.ShouldExcludePath(relativePath) { ++ continue // Skip ignored files + } + + // Get any events registered for the specific file, and append the specified event +@@ -158,18 +152,20 @@ func (w *FileWatcher) monitorFunc() { + eventList = append(eventList, event) + + // Register the event in the map, and dispatch all the events at once after the timeout ++ // Note that event.Path() is just the unique key for the map here, it is not actually ++ // used later when computing the changes of the filesystem. + w.batcher.Store(event.Path(), eventList) + log.Debugf("FileWatcher: Registered inotify events %v for path %q", eventList, event.Path()) + } + } + +-func (w *FileWatcher) dispatchFunc() { ++func (w *FileWatcher) dispatchFunc() error { + log.Debug("FileWatcher: Dispatch thread started") + defer log.Debug("FileWatcher: Dispatch thread stopped") + + for { + // Wait until we have a batch dispatched to us +- ok := w.batcher.ProcessBatch(func(key, val interface{}) bool { ++ ok := w.batcher.ProcessBatch(func(_, val interface{}) bool { + // Concatenate all known events, and dispatch them to be handled one by one + for _, event := range w.concatenateEvents(val.(notifyEvents)) { + w.sendUpdate(event) +@@ -179,56 +175,85 @@ func (w *FileWatcher) dispatchFunc() { + return true + }) + if !ok { +- return // The BatchWriter channel is closed, stop processing ++ logrus.Debug("FileWatcher: Got non-ok channel recieve from w.batcher, exiting dispatchFunc") ++ return nil // The BatchWriter channel is closed, stop processing + } + + log.Debug("FileWatcher: Dispatched events batch and reset the events cache") + } + } + +-func (w *FileWatcher) sendUpdate(update *FileUpdate) { +- log.Debugf("FileWatcher: Sending update: %s -> %q", update.Event, update.Path) +- w.updates <- update +-} ++func (w *FileWatcher) sendUpdate(event *fileevents.FileEvent) { ++ // Get the relative path between the root directory and the changed file ++ relativePath, err := filepath.Rel(w.dir, event.Path) ++ if err != nil { ++ logrus.Errorf("FileWatcher: Error occurred when computing relative path between: %s and %s: %v", w.dir, event.Path, err) ++ return ++ } ++ // Replace the full path with the relative path for the signaling upstream ++ event.Path = relativePath + +-// GetFileUpdateStream gets the channel with FileUpdates +-func (w *FileWatcher) GetFileUpdateStream() FileUpdateStream { +- return w.updates ++ if w.shouldSuspendEvent(event.Path) { ++ log.Debugf("FileWatcher: Skipping suspended event %s for path: %q", event.Type, event.Path) ++ return // Skip the suspended event ++ } ++ ++ log.Debugf("FileWatcher: Sending update: %s -> %q", event.Type, event.Path) ++ w.outbound <- event + } + + // Close closes active underlying resources +-func (w *FileWatcher) Close() { +- notify.Stop(w.events) ++func (w *FileWatcher) Close() error { ++ notify.Stop(w.inbound) + w.batcher.Close() +- close(w.events) // Close the event stream +- w.monitor.Wait() +- w.dispatcher.Wait() ++ close(w.inbound) // Close the inbound event stream ++ // No need to check the error here, as we only return nil above ++ _ = w.monitor.Wait() ++ _ = w.dispatcher.Wait() ++ return nil + } + +-// Suspend enables a one-time suspend of the given event, +-// the FileWatcher will skip the given event once +-func (w *FileWatcher) Suspend(updateEvent FileEvent) { +- w.suspendEvent = updateEvent ++// Suspend enables a one-time suspend of the given path ++// TODO: clarify how the path should be formatted ++func (w *FileWatcher) Suspend(_ context.Context, path string) { ++ w.suspendFilesMu.Lock() ++ defer w.suspendFilesMu.Unlock() ++ w.suspendFiles.Insert(path) + } + +-func convertEvent(event notify.Event) FileEvent { ++// shouldSuspendEvent checks if an event for the given path ++// should be suspended for one time. If it should, true will ++// be returned, and the mapping will be removed next time. ++func (w *FileWatcher) shouldSuspendEvent(path string) bool { ++ w.suspendFilesMu.Lock() ++ defer w.suspendFilesMu.Unlock() ++ // If the path should not be suspended, just return false and be done ++ if !w.suspendFiles.Has(path) { ++ return false ++ } ++ // Otherwise, remove it from the list and mark it as suspended ++ w.suspendFiles.Delete(path) ++ return true ++} ++ ++func convertEvent(event notify.Event) fileevents.FileEventType { + if updateEvent, ok := eventMap[event]; ok { + return updateEvent + } + +- return FileEventNone ++ return fileevents.FileEventNone + } + +-func convertUpdate(event notify.EventInfo) *FileUpdate { ++func convertUpdate(event notify.EventInfo) *fileevents.FileEvent { + fileEvent := convertEvent(event.Event()) +- if fileEvent == FileEventNone { ++ if fileEvent == fileevents.FileEventNone { + // This should never happen + panic(fmt.Sprintf("invalid event for update conversion: %q", event.Event().String())) + } + +- return &FileUpdate{ +- Event: fileEvent, +- Path: event.Path(), ++ return &fileevents.FileEvent{ ++ Path: event.Path(), ++ Type: fileEvent, + } + } + +@@ -247,7 +272,7 @@ func (w *FileWatcher) newMoveCache(event notify.EventInfo) *moveCache { + } + + // moveCaches wait one second to be cancelled before firing +- m.timer = time.AfterFunc(time.Second, m.incomplete) ++ m.timer = time.AfterFunc(w.opts.BatchTimeout, m.incomplete) + return m + } + +@@ -260,42 +285,53 @@ func (m *moveCache) cookie() uint32 { + // if only one is received, the file is moved in/out of a watched directory, which + // is treated as a normal creation/deletion by this method. + func (m *moveCache) incomplete() { +- var event FileEvent ++ var evType fileevents.FileEventType + + switch m.event.Event() { + case notify.InMovedFrom: +- event = FileEventDelete ++ evType = fileevents.FileEventDelete + case notify.InMovedTo: +- event = FileEventModify ++ evType = fileevents.FileEventModify + default: + // This should never happen + panic(fmt.Sprintf("moveCache: unrecognized event: %v", m.event.Event())) + } + + log.Tracef("moveCache: Timer expired for %d, dispatching...", m.cookie()) +- m.watcher.sendUpdate(&FileUpdate{event, m.event.Path()}) ++ m.watcher.sendUpdate(&fileevents.FileEvent{Path: m.event.Path(), Type: evType}) + + // Delete the cache after the timer has fired ++ moveCachesMu.Lock() + delete(moveCaches, m.cookie()) ++ moveCachesMu.Unlock() + } + + func (m *moveCache) cancel() { + m.timer.Stop() ++ moveCachesMu.Lock() + delete(moveCaches, m.cookie()) ++ moveCachesMu.Unlock() + log.Tracef("moveCache: Dispatching cancelled for %d", m.cookie()) + } + +-// moveCaches keeps track of active moves by cookie +-var moveCaches = make(map[uint32]*moveCache) ++var ( ++ // moveCaches keeps track of active moves by cookie ++ moveCaches = make(map[uint32]*moveCache) ++ moveCachesMu = &gosync.RWMutex{} ++) + + // move processes InMovedFrom and InMovedTo events in any order + // and dispatches FileUpdates when a move is detected +-func (w *FileWatcher) move(event notify.EventInfo) (moveUpdate *FileUpdate) { ++func (w *FileWatcher) move(event notify.EventInfo) (moveUpdate *fileevents.FileEvent) { + cookie := ievent(event).Cookie ++ moveCachesMu.RLock() + cache, ok := moveCaches[cookie] ++ moveCachesMu.RUnlock() + if !ok { + // The cookie is not cached, create a new cache object for it ++ moveCachesMu.Lock() + moveCaches[cookie] = w.newMoveCache(event) ++ moveCachesMu.Unlock() + return + } + +@@ -305,8 +341,8 @@ func (w *FileWatcher) move(event notify.EventInfo) (moveUpdate *FileUpdate) { + sourcePath, destPath = destPath, sourcePath + fallthrough + case notify.InMovedTo: +- cache.cancel() // Cancel dispatching the cache's incomplete move +- moveUpdate = &FileUpdate{FileEventMove, destPath} // Register an internal, complete move instead ++ cache.cancel() // Cancel dispatching the cache's incomplete move ++ moveUpdate = &fileevents.FileEvent{Path: destPath, Type: fileevents.FileEventMove} // Register an internal, complete move instead + log.Tracef("FileWatcher: Detected move: %q -> %q", sourcePath, destPath) + } + +@@ -315,8 +351,8 @@ func (w *FileWatcher) move(event notify.EventInfo) (moveUpdate *FileUpdate) { + + // concatenateEvents takes in a slice of events and concatenates + // all events possible based on combinedEvents. It also manages +-// file moving and conversion from notifyEvents to FileUpdates +-func (w *FileWatcher) concatenateEvents(events notifyEvents) FileUpdates { ++// file moving and conversion from notifyEvents to FileEvents ++func (w *FileWatcher) concatenateEvents(events notifyEvents) FileEvents { + for _, combinedEvent := range combinedEvents { + // Test if the prefix of the given events matches combinedEvent.input + if event, ok := combinedEvent.match(events); ok { +@@ -332,7 +368,7 @@ func (w *FileWatcher) concatenateEvents(events notifyEvents) FileUpdates { + } + + // Convert the events to updates +- updates := make(FileUpdates, 0, len(events)) ++ updates := make(FileEvents, 0, len(events)) + for _, event := range events { + switch event.Event() { + case notify.InMovedFrom, notify.InMovedTo: +@@ -352,3 +388,27 @@ func (w *FileWatcher) concatenateEvents(events notifyEvents) FileUpdates { + func ievent(event notify.EventInfo) *unix.InotifyEvent { + return event.Sys().(*unix.InotifyEvent) + } ++ ++// combinedEvent describes multiple events that should be concatenated into a single event ++type combinedEvent struct { ++ input []notify.Event // input is a slice of events to match (in bytes, it speeds up the comparison) ++ output int // output is the event's index that should be returned, negative values equal nil ++} ++ ++func (c *combinedEvent) match(events notifyEvents) (notify.EventInfo, bool) { ++ if len(c.input) > len(events) { ++ return nil, false // Not enough events, cannot match ++ } ++ ++ for i := 0; i < len(c.input); i++ { ++ if events[i].Event() != c.input[i] { ++ return nil, false ++ } ++ } ++ ++ if c.output > 0 { ++ return events[c.output], true ++ } ++ ++ return nil, true ++} +diff --git a/pkg/util/watcher/filewatcher_test.go b/pkg/storage/filesystem/fileevents/inotify/filewatcher_test.go +similarity index 60% +rename from pkg/util/watcher/filewatcher_test.go +rename to pkg/storage/filesystem/fileevents/inotify/filewatcher_test.go +index b80f9b2..c423f24 100644 +--- a/pkg/util/watcher/filewatcher_test.go ++++ b/pkg/storage/filesystem/fileevents/inotify/filewatcher_test.go +@@ -1,9 +1,12 @@ +-package watcher ++package inotify + + import ( ++ "fmt" ++ "strings" + "testing" + + "github.com/rjeczalik/notify" ++ "github.com/weaveworks/libgitops/pkg/storage/filesystem/fileevents" + "golang.org/x/sys/unix" + ) + +@@ -51,33 +54,33 @@ var testEvents = []notifyEvents{ + }, + } + +-var targets = []FileEvents{ ++var targets = []FileEventTypes{ + { +- FileEventModify, ++ fileevents.FileEventModify, + }, + { +- FileEventDelete, ++ fileevents.FileEventDelete, + }, + { +- FileEventModify, +- FileEventMove, +- FileEventDelete, ++ fileevents.FileEventModify, ++ fileevents.FileEventMove, ++ fileevents.FileEventDelete, + }, + { +- FileEventModify, ++ fileevents.FileEventModify, + }, + {}, + } + +-func extractEvents(updates FileUpdates) (events FileEvents) { +- for _, update := range updates { +- events = append(events, update.Event) ++func extractEventTypes(events FileEvents) (eventTypes FileEventTypes) { ++ for _, event := range events { ++ eventTypes = append(eventTypes, event.Type) + } + + return + } + +-func eventsEqual(a, b FileEvents) bool { ++func eventsEqual(a, b FileEventTypes) bool { + if len(a) != len(b) { + return false + } +@@ -91,9 +94,23 @@ func eventsEqual(a, b FileEvents) bool { + return true + } + ++// FileEventTypes is a slice of FileEventType ++type FileEventTypes []fileevents.FileEventType ++ ++var _ fmt.Stringer = FileEventTypes{} ++ ++func (e FileEventTypes) String() string { ++ strs := make([]string, 0, len(e)) ++ for _, ev := range e { ++ strs = append(strs, ev.String()) ++ } ++ ++ return strings.Join(strs, ",") ++} ++ + func TestEventConcatenation(t *testing.T) { + for i, e := range testEvents { +- result := extractEvents((&FileWatcher{}).concatenateEvents(e)) ++ result := extractEventTypes((&FileWatcher{}).concatenateEvents(e)) + if !eventsEqual(result, targets[i]) { + t.Errorf("wrong concatenation result: %v != %v", result, targets[i]) + } +diff --git a/pkg/storage/filesystem/fileevents/inotify/options.go b/pkg/storage/filesystem/fileevents/inotify/options.go +new file mode 100644 +index 0000000..2c48e5d +--- /dev/null ++++ b/pkg/storage/filesystem/fileevents/inotify/options.go +@@ -0,0 +1,59 @@ ++package inotify ++ ++import ( ++ "time" ++ ++ "github.com/weaveworks/libgitops/pkg/storage/filesystem" ++) ++ ++// How many inotify events we can buffer before watching is interrupted ++const DefaultEventBufferSize int32 = 4096 ++ ++type FileWatcherOption interface { ++ ApplyToFileWatcher(*FileWatcherOptions) ++} ++ ++var _ FileWatcherOption = &FileWatcherOptions{} ++ ++// FileWatcherOptions specifies options for the FileWatcher ++type FileWatcherOptions struct { ++ // BatchTimeout specifies the duration to wait after last event ++ // before dispatching grouped inotify events ++ // Default: 1s ++ BatchTimeout time.Duration ++ // EventBufferSize describes how many inotify events can be buffered ++ // before watching is interrupted/delayed. ++ // Default: DefaultEventBufferSize ++ EventBufferSize int32 ++ // PathExcluder provides a way to exclude paths. ++ // Default: filesystem.DefaultPathExcluders() ++ PathExcluder filesystem.PathExcluder ++} ++ ++func (o *FileWatcherOptions) ApplyToFileWatcher(target *FileWatcherOptions) { ++ if o.BatchTimeout != 0 { ++ target.BatchTimeout = o.BatchTimeout ++ } ++ if o.EventBufferSize != 0 { ++ target.EventBufferSize = o.EventBufferSize ++ } ++ if o.PathExcluder != nil { ++ target.PathExcluder = o.PathExcluder ++ } ++} ++ ++func (o *FileWatcherOptions) ApplyOptions(opts []FileWatcherOption) *FileWatcherOptions { ++ for _, opt := range opts { ++ opt.ApplyToFileWatcher(o) ++ } ++ return o ++} ++ ++// defaultOptions returns the default options ++func defaultOptions() *FileWatcherOptions { ++ return &FileWatcherOptions{ ++ BatchTimeout: 1 * time.Second, ++ EventBufferSize: DefaultEventBufferSize, ++ PathExcluder: filesystem.DefaultPathExcluders(), ++ } ++} +diff --git a/pkg/storage/filesystem/fileevents/interfaces.go b/pkg/storage/filesystem/fileevents/interfaces.go +new file mode 100644 +index 0000000..77d7708 +--- /dev/null ++++ b/pkg/storage/filesystem/fileevents/interfaces.go +@@ -0,0 +1,57 @@ ++package fileevents ++ ++import ( ++ "context" ++ "errors" ++ "io" ++ ++ "github.com/weaveworks/libgitops/pkg/storage/event" ++ "github.com/weaveworks/libgitops/pkg/storage/filesystem" ++) ++ ++var ( ++ // ErrTooManyWatches can happen when trying to register too many ++ // watching reciever channels to an event emitter. ++ ErrTooManyWatches = errors.New("too many watches already opened") ++) ++ ++// Emitter is an interface that provides high-level inotify-like ++// behaviour to consumers. It can be used e.g. by even higher-level ++// interfaces like FilesystemEventStorage. ++type Emitter interface { ++ // WatchForFileEvents starts feeding FileEvents into the given "into" ++ // channel. The caller is responsible for setting a channel buffering ++ // limit large enough to not block normal operation. An error might ++ // be returned if a maximum amount of watches has been opened already, ++ // e.g. ErrTooManyWatches. ++ // ++ // Note that it is the receiver's responsibility to "validate" the ++ // file so it matches any user defined policy (e.g. only specific ++ // content types, or a PathExcluder has been given). ++ WatchForFileEvents(ctx context.Context, into FileEventStream) error ++ ++ // Suspend blocks the next event dispatch for this given path. Useful ++ // for not sending "your own" modification events into the ++ // FileEventStream that is listening. path is relative. ++ Suspend(ctx context.Context, path string) ++ ++ // Close closes the emitter gracefully. ++ io.Closer ++} ++ ++// StorageCommon is an extension to event.StorageCommon that ++// also contains an underlying Emitter. This is meant to be ++// used in tandem with filesystem.Storages. ++type StorageCommon interface { ++ event.StorageCommon ++ ++ // FileEventsEmitter gets the Emitter used internally. ++ FileEventsEmitter() Emitter ++} ++ ++// FilesystemEventStorage is the combination of a filesystem.Storage, ++// and the possibility to listen for object updates from a Emitter. ++type FilesystemEventStorage interface { ++ filesystem.Storage ++ StorageCommon ++} +diff --git a/pkg/storage/filesystem/filefinder_simple.go b/pkg/storage/filesystem/filefinder_simple.go +new file mode 100644 +index 0000000..e0e6940 +--- /dev/null ++++ b/pkg/storage/filesystem/filefinder_simple.go +@@ -0,0 +1,235 @@ ++package filesystem ++ ++import ( ++ "context" ++ "errors" ++ "fmt" ++ "os" ++ "path/filepath" ++ "strings" ++ ++ "github.com/weaveworks/libgitops/pkg/serializer" ++ "github.com/weaveworks/libgitops/pkg/storage/core" ++ "k8s.io/apimachinery/pkg/util/sets" ++) ++ ++// NewSimpleStorage is a default opinionated constructor for a Storage ++// using SimpleFileFinder as the FileFinder, and the local disk as target. ++// If you need more advanced customizablility than provided here, you can compose ++// the call to filesystem.NewGeneric yourself. ++func NewSimpleStorage(dir string, namespacer core.Namespacer, opts SimpleFileFinderOptions) (Storage, error) { ++ fs := NewOSFilesystem(dir) ++ fileFinder, err := NewSimpleFileFinder(fs, opts) ++ if err != nil { ++ return nil, err ++ } ++ // fileFinder and namespacer are validated by filesystem.NewGeneric. ++ return NewGeneric(fileFinder, namespacer) ++} ++ ++func NewSimpleFileFinder(fs Filesystem, opts SimpleFileFinderOptions) (*SimpleFileFinder, error) { ++ if fs == nil { ++ return nil, fmt.Errorf("NewSimpleFileFinder: fs is mandatory") ++ } ++ ct := serializer.ContentTypeJSON ++ if len(opts.ContentType) != 0 { ++ ct = opts.ContentType ++ } ++ resolver := DefaultFileExtensionResolver ++ if opts.FileExtensionResolver != nil { ++ resolver = opts.FileExtensionResolver ++ } ++ return &SimpleFileFinder{ ++ fs: fs, ++ opts: opts, ++ contentTyper: StaticContentTyper{ContentType: ct}, ++ resolver: resolver, ++ }, nil ++} ++ ++// isObjectIDNamespaced returns true if the ID is of a namespaced GroupKind, and ++// false if the GroupKind is non-namespaced. NOTE: This ONLY works for FileFinders ++// where the Storage has made sure that the namespacing conventions are followed. ++func isObjectIDNamespaced(id core.UnversionedObjectID) bool { ++ return id.ObjectKey().Namespace != "" ++} ++ ++var _ FileFinder = &SimpleFileFinder{} ++ ++// SimpleFileFinder is a FileFinder-compliant implementation that ++// stores Objects on disk using a straightforward directory layout. ++// ++// The following directory layout is used: ++// if DisableGroupDirectory == false && SubDirectoryFileName == "" { ++// ////. if namespaced or ++// ///. if non-namespaced ++// } ++// else if DisableGroupDirectory == false && SubDirectoryFileName == "foo" { ++// /////foo. if namespaced or ++// ////foo. if non-namespaced ++// } ++// else if DisableGroupDirectory == true && SubDirectoryFileName == "" { ++// ///. if namespaced or ++// //. if non-namespaced ++// } ++// else if DisableGroupDirectory == true && SubDirectoryFileName == "foo" { ++// ////foo. if namespaced or ++// ///foo. if non-namespaced ++// } ++// ++// is resolved by the FileExtensionResolver, for the given ContentType. ++// ++// This FileFinder does not support the ObjectAt method. ++type SimpleFileFinder struct { ++ fs Filesystem ++ opts SimpleFileFinderOptions ++ contentTyper StaticContentTyper ++ resolver FileExtensionResolver ++} ++ ++type SimpleFileFinderOptions struct { ++ // Default: false; means enable group directory ++ DisableGroupDirectory bool ++ // Default: ""; means use file names as the means of storage ++ SubDirectoryFileName string ++ // Default: serializer.ContentTypeJSON ++ ContentType serializer.ContentType ++ // Default: DefaultFileExtensionResolver ++ FileExtensionResolver FileExtensionResolver ++} ++ ++// TODO: Use group name "core" if group is "" to support core k8s objects. ++ ++func (f *SimpleFileFinder) Filesystem() Filesystem { ++ return f.fs ++} ++ ++func (f *SimpleFileFinder) ContentTyper() ContentTyper { ++ return f.contentTyper ++} ++ ++// ObjectPath gets the file path relative to the root directory ++func (f *SimpleFileFinder) ObjectPath(ctx context.Context, id core.UnversionedObjectID) (string, error) { ++ // // ++ paths := []string{f.kindKeyPath(id.GroupKind())} ++ ++ if isObjectIDNamespaced(id) { ++ // .// ++ paths = append(paths, id.ObjectKey().Namespace) ++ } ++ // Get the file extension ++ ext, err := f.ext() ++ if err != nil { ++ return "", err ++ } ++ if f.opts.SubDirectoryFileName == "" { ++ // ./. ++ paths = append(paths, id.ObjectKey().Name+ext) ++ } else { ++ // .//. ++ paths = append(paths, id.ObjectKey().Name, f.opts.SubDirectoryFileName+ext) ++ } ++ return filepath.Join(paths...), nil ++} ++ ++func (f *SimpleFileFinder) kindKeyPath(gk core.GroupKind) string { ++ if f.opts.DisableGroupDirectory { ++ // .// ++ return filepath.Join(gk.Kind) ++ } ++ // ./// ++ return filepath.Join(gk.Group, gk.Kind) ++} ++ ++// ObjectAt retrieves the ID containing the virtual path based ++// on the given physical file path. ++func (f *SimpleFileFinder) ObjectAt(ctx context.Context, path string) (core.UnversionedObjectID, error) { ++ return nil, errors.New("not implemented") ++} ++ ++func (f *SimpleFileFinder) ext() (string, error) { ++ return f.resolver.ExtensionForContentType(f.contentTyper.ContentType) ++} ++ ++// ListNamespaces lists the available namespaces for the given GroupKind. ++// This function shall only be called for namespaced objects, it is up to ++// the caller to make sure they do not call this method for root-spaced ++// objects. If any of the given rules are violated, ErrNamespacedMismatch ++// should be returned as a wrapped error. ++// ++// The implementer can choose between basing the answer strictly on e.g. ++// v1.Namespace objects that exist in the system, or just the set of ++// different namespaces that have been set on any object belonging to ++// the given GroupKind. ++func (f *SimpleFileFinder) ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.String, error) { ++ entries, err := readDir(ctx, f.fs, f.kindKeyPath(gk)) ++ if err != nil { ++ return nil, err ++ } ++ return sets.NewString(entries...), nil ++} ++ ++// ListObjectIDs returns a list of unversioned ObjectIDs. ++// For namespaced GroupKinds, the caller must provide a namespace, and for ++// root-spaced GroupKinds, the caller must not. When namespaced, this function ++// must only return object IDs for that given namespace. If any of the given ++// rules are violated, ErrNamespacedMismatch should be returned as a wrapped error. ++func (f *SimpleFileFinder) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) ([]core.UnversionedObjectID, error) { ++ // If namespace is empty, the names will be in ./, otherwise .// ++ namesDir := filepath.Join(f.kindKeyPath(gk), namespace) ++ entries, err := readDir(ctx, f.fs, namesDir) ++ if err != nil { ++ return nil, err ++ } ++ // Get the file extension ++ ext, err := f.ext() ++ if err != nil { ++ return nil, err ++ } ++ // Map the names to UnversionedObjectIDs ++ ids := make([]core.UnversionedObjectID, 0, len(entries)) ++ for _, entry := range entries { ++ // Loop through all entries, and make sure they are sanitized .metadata.name's ++ if f.opts.SubDirectoryFileName != "" { ++ // If f.SubDirectoryFileName != "", the file names already match .metadata.name ++ // Make sure the metadata file ./<.metadata.name>/. actually exists ++ expectedPath := filepath.Join(namesDir, entry, f.opts.SubDirectoryFileName+ext) ++ if exists, _ := f.fs.Exists(ctx, expectedPath); !exists { ++ continue ++ } ++ } else { ++ // Storage path is ./.. entry is "." ++ // Verify the extension is there and strip it from name. If ext isn't there, just continue ++ if !strings.HasSuffix(entry, ext) { ++ continue ++ } ++ // Remove the extension from the name ++ entry = strings.TrimSuffix(entry, ext) ++ } ++ // If we got this far, add the key to the list ++ ids = append(ids, core.NewUnversionedObjectID(gk, core.ObjectKey{Name: entry, Namespace: namespace})) ++ } ++ return ids, nil ++} ++ ++func readDir(ctx context.Context, fs Filesystem, dir string) ([]string, error) { ++ fi, err := fs.Stat(ctx, dir) ++ if os.IsNotExist(err) { ++ // It's ok if the directory doesn't exist (yet), we just don't have any items then :) ++ return nil, nil ++ } else if !fi.IsDir() { ++ // Unexpected, if the directory actually would be a file ++ return nil, fmt.Errorf("expected that %s is a directory", dir) ++ } ++ ++ // When we know that path is a directory, go ahead and read it ++ entries, err := fs.ReadDir(ctx, dir) ++ if err != nil { ++ return nil, err ++ } ++ fileNames := make([]string, 0, len(entries)) ++ for _, entry := range entries { ++ fileNames = append(fileNames, entry.Name()) ++ } ++ return fileNames, nil ++} +diff --git a/pkg/storage/filesystem/filesystem.go b/pkg/storage/filesystem/filesystem.go +new file mode 100644 +index 0000000..f523e7b +--- /dev/null ++++ b/pkg/storage/filesystem/filesystem.go +@@ -0,0 +1,128 @@ ++package filesystem ++ ++import ( ++ "context" ++ "os" ++ "path/filepath" ++ "strconv" ++ ++ "github.com/spf13/afero" ++) ++ ++// Filesystem extends afero.Fs and afero.Afero with contexts added to every method. ++type Filesystem interface { ++ ++ // Members of afero.Fs ++ ++ // MkdirAll creates a directory path and all parents that does not exist ++ // yet. ++ MkdirAll(ctx context.Context, path string, perm os.FileMode) error ++ // Remove removes a file identified by name, returning an error, if any ++ // happens. ++ Remove(ctx context.Context, name string) error ++ // Stat returns a FileInfo describing the named file, or an error, if any ++ // happens. ++ Stat(ctx context.Context, name string) (os.FileInfo, error) ++ ++ // Members of afero.Afero ++ ++ ReadDir(ctx context.Context, dirname string) ([]os.FileInfo, error) ++ ++ Exists(ctx context.Context, path string) (bool, error) ++ ++ ReadFile(ctx context.Context, filename string) ([]byte, error) ++ ++ WriteFile(ctx context.Context, filename string, data []byte, perm os.FileMode) error ++ ++ Walk(ctx context.Context, root string, walkFn filepath.WalkFunc) error ++ ++ // Custom methods ++ ++ // Checksum returns a checksum of the given file. ++ // ++ // What the checksum is is application-dependent, however, it ++ // should be the same for two invocations, as long as the stored ++ // data is the same. It might change over time although the ++ // underlying data did not. Examples of checksums that can be ++ // used is: the file modification timestamp, a sha256sum of the ++ // file content, or the latest Git commit when the file was ++ // changed. ++ // ++ // os.IsNotExist(err) can be used to check if the file doesn't ++ // exist. ++ Checksum(ctx context.Context, filename string) (string, error) ++ ++ // RootDirectory specifies where on disk the root directory is stored. ++ // This path MUST be absolute. All other paths for the other methods ++ // MUST be relative to this directory. ++ RootDirectory() string ++} ++ ++// NewOSFilesystem creates a new afero.OsFs for the local directory, using ++// NewFilesystem underneath. ++func NewOSFilesystem(rootDir string) Filesystem { ++ return NewFilesystem(afero.NewOsFs(), rootDir) ++} ++ ++// NewFilesystem wraps an underlying afero.Fs without context knowledge, ++// in a Filesystem-compliant implementation; scoped at the given directory ++// (i.e. wrapped in afero.NewBasePathFs(fs, rootDir)). ++// ++// Checksum is calculated based on the modification timestamp of the file. ++func NewFilesystem(fs afero.Fs, rootDir string) Filesystem { ++ // TODO: rootDir validation? It must be absolute, exist, and be a directory. ++ return &filesystem{afero.NewBasePathFs(fs, rootDir), rootDir} ++} ++ ++type filesystem struct { ++ fs afero.Fs ++ rootDir string ++} ++ ++func (f *filesystem) RootDirectory() string { ++ return f.rootDir ++} ++ ++func (f *filesystem) Checksum(ctx context.Context, filename string) (string, error) { ++ fi, err := f.Stat(ctx, filename) ++ if err != nil { ++ return "", err ++ } ++ return checksumFromFileInfo(fi), nil ++} ++ ++func (f *filesystem) MkdirAll(_ context.Context, path string, perm os.FileMode) error { ++ return f.fs.MkdirAll(path, perm) ++} ++ ++func (f *filesystem) Remove(_ context.Context, name string) error { ++ return f.fs.Remove(name) ++} ++ ++func (f *filesystem) Stat(_ context.Context, name string) (os.FileInfo, error) { ++ return f.fs.Stat(name) ++} ++ ++func (f *filesystem) ReadDir(_ context.Context, dirname string) ([]os.FileInfo, error) { ++ return afero.ReadDir(f.fs, dirname) ++} ++ ++func (f *filesystem) Exists(_ context.Context, path string) (bool, error) { ++ return afero.Exists(f.fs, path) ++} ++ ++func (f *filesystem) ReadFile(_ context.Context, filename string) ([]byte, error) { ++ return afero.ReadFile(f.fs, filename) ++} ++ ++func (f *filesystem) WriteFile(_ context.Context, filename string, data []byte, perm os.FileMode) error { ++ return afero.WriteFile(f.fs, filename, data, perm) ++} ++ ++func (f *filesystem) Walk(_ context.Context, root string, walkFn filepath.WalkFunc) error { ++ return afero.Walk(f.fs, root, walkFn) ++} ++ ++func checksumFromFileInfo(fi os.FileInfo) string { ++ return strconv.FormatInt(fi.ModTime().UnixNano(), 10) ++} +diff --git a/pkg/storage/filesystem/format.go b/pkg/storage/filesystem/format.go +new file mode 100644 +index 0000000..b36aa1c +--- /dev/null ++++ b/pkg/storage/filesystem/format.go +@@ -0,0 +1,92 @@ ++package filesystem ++ ++import ( ++ "context" ++ "errors" ++ "fmt" ++ "path/filepath" ++ ++ "github.com/weaveworks/libgitops/pkg/serializer" ++) ++ ++var ( ++ ErrCannotDetermineContentType = errors.New("cannot determine content type") ++ ErrUnrecognizedContentType = errors.New("unrecognized content type") ++) ++ ++// ContentTyper resolves the Content Type of a file given its path and the afero ++// filesystem abstraction, so that it is possible to even examine the file if needed ++// for making the judgement. See DefaultContentTyper for a sample implementation. ++type ContentTyper interface { ++ // ContentTypeForPath should return the content type for the file that exists in ++ // the given Filesystem (path is relative). If the content type cannot be determined ++ // please return a wrapped ErrCannotDetermineContentType error. ++ ContentTypeForPath(ctx context.Context, fs Filesystem, path string) (serializer.ContentType, error) ++} ++ ++// DefaultContentTypes describes the default connection between ++// file extensions and a content types. ++var DefaultContentTyper ContentTyper = ContentTypeForExtension{ ++ ".json": serializer.ContentTypeJSON, ++ ".yaml": serializer.ContentTypeYAML, ++ ".yml": serializer.ContentTypeYAML, ++} ++ ++// ContentTypeForExtension implements the ContentTyper interface ++// by looking up the extension of the given path in ContentTypeForPath ++// matched against the key of the map. The extension in the map key ++// must start with a dot, e.g. ".json". The value of the map contains ++// the corresponding content type. There might be many extensions which ++// map to the same content type, e.g. both ".yaml" -> ContentTypeYAML ++// and ".yml" -> ContentTypeYAML. ++type ContentTypeForExtension map[string]serializer.ContentType ++ ++func (m ContentTypeForExtension) ContentTypeForPath(ctx context.Context, _ Filesystem, path string) (serializer.ContentType, error) { ++ ct, ok := m[filepath.Ext(path)] ++ if !ok { ++ return serializer.ContentType(""), fmt.Errorf("%w for file %q", ErrCannotDetermineContentType, path) ++ } ++ return ct, nil ++} ++ ++// StaticContentTyper always responds with the same, statically-set, ContentType for any path. ++type StaticContentTyper struct { ++ // ContentType is a required field ++ ContentType serializer.ContentType ++} ++ ++func (t StaticContentTyper) ContentTypeForPath(_ context.Context, _ Filesystem, _ string) (serializer.ContentType, error) { ++ if len(t.ContentType) == 0 { ++ return "", fmt.Errorf("StaticContentTyper.ContentType must not be empty") ++ } ++ return t.ContentType, nil ++} ++ ++// FileExtensionResolver knows how to resolve what file extension to use for ++// a given ContentType. ++type FileExtensionResolver interface { ++ // ContentTypeExtension returns the file extension for the given ContentType. ++ // The returned string MUST start with a dot, e.g. ".json". If the given ++ // ContentType is not known, it is recommended to return a wrapped ++ // ErrUnrecognizedContentType. ++ ExtensionForContentType(ct serializer.ContentType) (string, error) ++} ++ ++// DefaultFileExtensionResolver describes a default connection between ++// the file extensions and ContentTypes , namely JSON -> ".json" and ++// YAML -> ".yaml". ++var DefaultFileExtensionResolver FileExtensionResolver = ExtensionForContentType{ ++ serializer.ContentTypeJSON: ".json", ++ serializer.ContentTypeYAML: ".yaml", ++} ++ ++// ExtensionForContentType is a simple map implementation of FileExtensionResolver. ++type ExtensionForContentType map[serializer.ContentType]string ++ ++func (m ExtensionForContentType) ExtensionForContentType(ct serializer.ContentType) (string, error) { ++ ext, ok := m[ct] ++ if !ok { ++ return "", fmt.Errorf("%q: %q", ErrUnrecognizedContentType, ct) ++ } ++ return ext, nil ++} +diff --git a/pkg/storage/filesystem/interfaces.go b/pkg/storage/filesystem/interfaces.go +new file mode 100644 +index 0000000..2626680 +--- /dev/null ++++ b/pkg/storage/filesystem/interfaces.go +@@ -0,0 +1,49 @@ ++package filesystem ++ ++import ( ++ "context" ++ ++ "github.com/weaveworks/libgitops/pkg/storage" ++ "github.com/weaveworks/libgitops/pkg/storage/core" ++) ++ ++// Storage (in this filesystem package) extends storage.Storage by specializing it to operate in a ++// filesystem context, and in other words use a FileFinder to locate the ++// files to operate on. ++type Storage interface { ++ storage.Storage ++ ++ // FileFinder returns the underlying FileFinder used. ++ // TODO: Maybe one Storage can have multiple FileFinders? ++ FileFinder() FileFinder ++} ++ ++// FileFinder is a generic implementation for locating files on disk, to be ++// used by a Storage. ++// ++// Important: The caller MUST guarantee that the implementation can figure ++// out if the GroupKind is namespaced or not by the following check: ++// ++// namespaced := id.ObjectKey().Namespace != "" ++// ++// In other words, the caller must enforce a namespace being set for namespaced ++// kinds, and namespace not being set for non-namespaced kinds. ++type FileFinder interface { ++ // Filesystem gets the underlying filesystem abstraction, if ++ // applicable. ++ Filesystem() Filesystem ++ ++ // ContentTyper gets the underlying ContentTyper used. The ContentTyper ++ // must always return a result although the underlying given path doesn't ++ // exist. ++ ContentTyper() ContentTyper ++ ++ // ObjectPath gets the file path relative to the root directory. ++ // In order to support a create operation, this function must also return a valid path for ++ // files that do not yet exist on disk. ++ ObjectPath(ctx context.Context, id core.UnversionedObjectID) (string, error) ++ // ObjectAt retrieves the ID based on the given relative file path to fs. ++ ObjectAt(ctx context.Context, path string) (core.UnversionedObjectID, error) ++ // The FileFinder should be able to list namespaces and Object IDs ++ storage.Lister ++} +diff --git a/pkg/storage/filesystem/path_excluder.go b/pkg/storage/filesystem/path_excluder.go +new file mode 100644 +index 0000000..58e8d2a +--- /dev/null ++++ b/pkg/storage/filesystem/path_excluder.go +@@ -0,0 +1,92 @@ ++package filesystem ++ ++import ( ++ "os" ++ "path/filepath" ++ "strings" ++ ++ "k8s.io/apimachinery/pkg/util/sets" ++) ++ ++// PathExcluder is an interface that lets the user implement custom policies ++// for whether a given relative path to a given directory (fs is scoped at ++// that directory) should be considered for an operation (e.g. inotify watch ++// or file search). ++type PathExcluder interface { ++ // ShouldExcludePath takes in a relative path to the file which maybe ++ // should be excluded. ++ ShouldExcludePath(path string) bool ++} ++ ++// DefaultPathExcluders returns a composition of ++// ExcludeDirectoryNames{} for ".git" dirs and ExcludeExtensions{} for the ".swp" file extensions. ++func DefaultPathExcluders() PathExcluder { ++ return MultiPathExcluder{ ++ PathExcluders: []PathExcluder{ ++ ExcludeDirectoryNames{ ++ DirectoryNamesToExclude: []string{".git"}, ++ }, ++ ExcludeExtensions{ ++ Extensions: []string{".swp"}, // nano creates temporary .swp ++ }, ++ }, ++ } ++} ++ ++// ExcludeDirectoryNames implements PathExcluder. ++var _ PathExcluder = ExcludeDirectoryNames{} ++ ++// ExcludeDirectories is a sample implementation of PathExcluder, that excludes ++// files that have any parent directories with the given names. ++type ExcludeDirectoryNames struct { ++ DirectoryNamesToExclude []string ++} ++ ++func (e ExcludeDirectoryNames) ShouldExcludePath(path string) bool { ++ parts := strings.Split(filepath.Clean(path), string(os.PathSeparator)) ++ return sets.NewString(parts[:len(parts)-1]...).HasAny(e.DirectoryNamesToExclude...) ++} ++ ++// ExcludeExtensions implements PathExcluder. ++var _ PathExcluder = ExcludeExtensions{} ++ ++// ExcludeExtensions is a sample implementation of PathExcluder, that excludes ++// all files with the given extensions. The strings in the Extensions slice ++// must be in the form of filepath.Ext, i.e. ".json", ".txt", and so forth. ++// The zero value of ExcludeExtensions excludes no files. ++type ExcludeExtensions struct { ++ Extensions []string ++} ++ ++func (e ExcludeExtensions) ShouldExcludePath(path string) bool { ++ ext := filepath.Ext(path) ++ for _, exclExt := range e.Extensions { ++ if ext == exclExt { ++ return true ++ } ++ } ++ return false ++} ++ ++// MultiPathExcluder implements PathExcluder. ++var _ PathExcluder = &MultiPathExcluder{} ++ ++// MultiPathExcluder is a composite PathExcluder that runs all of the ++// PathExcluders in the slice one-by-one, and returns true if any of them ++// does. The zero value of MultiPathExcluder excludes no files. ++type MultiPathExcluder struct { ++ PathExcluders []PathExcluder ++} ++ ++func (m MultiPathExcluder) ShouldExcludePath(path string) bool { ++ // Loop through all the excluders, and return true if any of them does ++ for _, excl := range m.PathExcluders { ++ if excl == nil { ++ continue ++ } ++ if excl.ShouldExcludePath(path) { ++ return true ++ } ++ } ++ return false ++} +diff --git a/pkg/storage/filesystem/path_excluder_test.go b/pkg/storage/filesystem/path_excluder_test.go +new file mode 100644 +index 0000000..5995fd2 +--- /dev/null ++++ b/pkg/storage/filesystem/path_excluder_test.go +@@ -0,0 +1,77 @@ ++package filesystem ++ ++import ( ++ "testing" ++) ++ ++func TestExcludeGitDirectory_ShouldExcludePath(t *testing.T) { ++ tests := []struct { ++ name string ++ path string ++ want bool ++ }{ ++ { ++ name: "normal", ++ path: ".git/foo", ++ want: true, ++ }, ++ { ++ name: "with relative path", ++ path: "./.git/bar/baz", ++ want: true, ++ }, ++ { ++ name: "with many parents", ++ path: "/foo/bar/.git/hello", ++ want: true, ++ }, ++ { ++ name: "with many children", ++ path: ".git/foo/bar/baz", ++ want: true, ++ }, ++ { ++ name: "with parents and children", ++ path: "./foo/bar/.git/baz/bar", ++ want: true, ++ }, ++ { ++ name: "empty", ++ path: "", ++ want: false, ++ }, ++ { ++ name: "local dir", ++ path: ".", ++ want: false, ++ }, ++ { ++ name: "other prefix", ++ path: "foo.git", ++ want: false, ++ }, ++ { ++ name: "other suffix", ++ path: ".gitea", ++ want: false, ++ }, ++ { ++ name: "absolute path without git", ++ path: "/foo/bar/no/git/here", ++ want: false, ++ }, ++ { ++ name: "don't catch files named .git", ++ path: "/hello/.git", ++ want: false, ++ }, ++ } ++ e := ExcludeDirectoryNames{DirectoryNamesToExclude: []string{".git"}} ++ for _, tt := range tests { ++ t.Run(tt.name, func(t *testing.T) { ++ if got := e.ShouldExcludePath(tt.path); got != tt.want { ++ t.Errorf("ExcludeGitDirectory.ShouldExcludePath() = %v, want %v", got, tt.want) ++ } ++ }) ++ } ++} +diff --git a/pkg/storage/filesystem/storage.go b/pkg/storage/filesystem/storage.go +new file mode 100644 +index 0000000..f3bc287 +--- /dev/null ++++ b/pkg/storage/filesystem/storage.go +@@ -0,0 +1,170 @@ ++package filesystem ++ ++import ( ++ "context" ++ "fmt" ++ "os" ++ "path/filepath" ++ ++ "github.com/weaveworks/libgitops/pkg/serializer" ++ "github.com/weaveworks/libgitops/pkg/storage" ++ "github.com/weaveworks/libgitops/pkg/storage/core" ++ "k8s.io/apimachinery/pkg/util/sets" ++) ++ ++// NewGeneric creates a new Generic using the given lower-level ++// FileFinder and Namespacer. ++func NewGeneric(fileFinder FileFinder, namespacer core.Namespacer) (Storage, error) { ++ if fileFinder == nil { ++ return nil, fmt.Errorf("NewGeneric: fileFinder is mandatory") ++ } ++ if namespacer == nil { ++ return nil, fmt.Errorf("NewGeneric: namespacer is mandatory") ++ } ++ ++ return &Generic{ ++ fileFinder: fileFinder, ++ namespacer: namespacer, ++ }, nil ++} ++ ++// Generic is a Storage-compliant implementation, that ++// combines the given lower-level FileFinder, Namespacer and Filesystem interfaces ++// in a generic manner. ++type Generic struct { ++ fileFinder FileFinder ++ namespacer core.Namespacer ++} ++ ++func (r *Generic) Namespacer() core.Namespacer { ++ return r.namespacer ++} ++ ++func (r *Generic) FileFinder() FileFinder { ++ return r.fileFinder ++} ++ ++func (r *Generic) Read(ctx context.Context, id core.UnversionedObjectID) ([]byte, error) { ++ // Get the path and verify namespacing info ++ p, err := r.getPath(ctx, id) ++ if err != nil { ++ return nil, err ++ } ++ // Check if the resource indicated by key exists ++ if !r.exists(ctx, p) { ++ return nil, core.NewErrNotFound(id) ++ } ++ // Read the file ++ return r.FileFinder().Filesystem().ReadFile(ctx, p) ++} ++ ++func (r *Generic) Exists(ctx context.Context, id core.UnversionedObjectID) bool { ++ // Get the path and verify namespacing info ++ p, err := r.getPath(ctx, id) ++ if err != nil { ++ return false ++ } ++ return r.exists(ctx, p) ++} ++ ++func (r *Generic) exists(ctx context.Context, path string) bool { ++ exists, _ := r.FileFinder().Filesystem().Exists(ctx, path) ++ return exists ++} ++ ++func (r *Generic) Checksum(ctx context.Context, id core.UnversionedObjectID) (string, error) { ++ // Get the path and verify namespacing info ++ p, err := r.getPath(ctx, id) ++ if err != nil { ++ return "", err ++ } ++ // Return a "high level" error if the file does not exist ++ checksum, err := r.FileFinder().Filesystem().Checksum(ctx, p) ++ if os.IsNotExist(err) { ++ return "", core.NewErrNotFound(id) ++ } else if err != nil { ++ return "", err ++ } ++ return checksum, nil ++} ++ ++func (r *Generic) ContentType(ctx context.Context, id core.UnversionedObjectID) (serializer.ContentType, error) { ++ // Get the path and verify namespacing info ++ p, err := r.getPath(ctx, id) ++ if err != nil { ++ return "", err ++ } ++ return r.FileFinder().ContentTyper().ContentTypeForPath(ctx, r.fileFinder.Filesystem(), p) ++} ++ ++func (r *Generic) Write(ctx context.Context, id core.UnversionedObjectID, content []byte) error { ++ // Get the path and verify namespacing info ++ p, err := r.getPath(ctx, id) ++ if err != nil { ++ return err ++ } ++ ++ // Create the underlying directories if they do not exist already ++ if !r.exists(ctx, p) { ++ if err := r.FileFinder().Filesystem().MkdirAll(ctx, filepath.Dir(p), 0755); err != nil { ++ return err ++ } ++ } ++ // Write the file content ++ return r.FileFinder().Filesystem().WriteFile(ctx, p, content, 0664) ++} ++ ++func (r *Generic) Delete(ctx context.Context, id core.UnversionedObjectID) error { ++ // Get the path and verify namespacing info ++ p, err := r.getPath(ctx, id) ++ if err != nil { ++ return err ++ } ++ ++ // Check if the resource indicated by key exists ++ if !r.exists(ctx, p) { ++ return core.NewErrNotFound(id) ++ } ++ // Remove the file ++ return r.FileFinder().Filesystem().Remove(ctx, p) ++} ++ ++// ListNamespaces lists the available namespaces for the given GroupKind. ++// This function shall only be called for namespaced objects, it is up to ++// the caller to make sure they do not call this method for root-spaced ++// objects; for that the behavior is undefined (but returning an error ++// is recommended). ++func (r *Generic) ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.String, error) { ++ namespaced, err := r.namespacer.IsNamespaced(gk) ++ if err != nil { ++ return nil, err ++ } ++ // Validate the groupkind ++ if !namespaced { ++ return nil, fmt.Errorf("%w: cannot list namespaces for non-namespaced kind: %v", storage.ErrNamespacedMismatch, gk) ++ } ++ // Just use the underlying filefinder ++ return r.FileFinder().ListNamespaces(ctx, gk) ++} ++ ++// ListObjectIDs returns a list of unversioned ObjectIDs. ++// For namespaced GroupKinds, the caller must provide a namespace, and for ++// root-spaced GroupKinds, the caller must not. When namespaced, this function ++// must only return object IDs for that given namespace. ++func (r *Generic) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) ([]core.UnversionedObjectID, error) { ++ // Validate the namespace parameter ++ if err := storage.VerifyNamespaced(r.Namespacer(), gk, namespace); err != nil { ++ return nil, err ++ } ++ // Just use the underlying filefinder ++ return r.FileFinder().ListObjectIDs(ctx, gk, namespace) ++} ++ ++func (r *Generic) getPath(ctx context.Context, id core.UnversionedObjectID) (string, error) { ++ // Verify namespacing info ++ if err := storage.VerifyNamespaced(r.Namespacer(), id.GroupKind(), id.ObjectKey().Namespace); err != nil { ++ return "", err ++ } ++ // Get the path ++ return r.FileFinder().ObjectPath(ctx, id) ++} +diff --git a/pkg/storage/filesystem/unstructured/event/storage.go b/pkg/storage/filesystem/unstructured/event/storage.go +new file mode 100644 +index 0000000..0d674b5 +--- /dev/null ++++ b/pkg/storage/filesystem/unstructured/event/storage.go +@@ -0,0 +1,352 @@ ++package unstructuredevent ++ ++import ( ++ "context" ++ "fmt" ++ gosync "sync" ++ ++ "github.com/sirupsen/logrus" ++ "github.com/weaveworks/libgitops/pkg/storage" ++ "github.com/weaveworks/libgitops/pkg/storage/core" ++ "github.com/weaveworks/libgitops/pkg/storage/event" ++ "github.com/weaveworks/libgitops/pkg/storage/filesystem" ++ "github.com/weaveworks/libgitops/pkg/storage/filesystem/fileevents" ++ "github.com/weaveworks/libgitops/pkg/storage/filesystem/fileevents/inotify" ++ "github.com/weaveworks/libgitops/pkg/storage/filesystem/unstructured" ++ "github.com/weaveworks/libgitops/pkg/util/sync" ++) ++ ++// UnstructuredEventStorage is an extension of raw.UnstructuredStorage, that ++// adds the possiblility to listen for object updates from a FileEventsEmitter. ++// ++// When the Sync() function is run; the ObjectEvents that are emitted to the ++// listening channels with have ObjectEvent.Type == ObjectEventSync. ++type UnstructuredEventStorage interface { ++ unstructured.Storage ++ fileevents.StorageCommon ++} ++ ++const defaultEventsBufferSize = 4096 ++ ++// NewManifest is a high-level constructor for a generic ++// MappedFileFinder and filesystem.Storage, together with a ++// inotify FileWatcher; all combined into an UnstructuredEventStorage. ++func NewManifest( ++ dir string, ++ contentTyper filesystem.ContentTyper, ++ namespacer core.Namespacer, ++ recognizer core.ObjectRecognizer, ++ pathExcluder filesystem.PathExcluder, ++) (UnstructuredEventStorage, error) { ++ fs := filesystem.NewOSFilesystem(dir) ++ fileFinder := unstructured.NewGenericMappedFileFinder(contentTyper, fs) ++ fsRaw, err := filesystem.NewGeneric(fileFinder, namespacer) ++ if err != nil { ++ return nil, err ++ } ++ emitter, err := inotify.NewFileWatcher(dir, &inotify.FileWatcherOptions{ ++ PathExcluder: pathExcluder, ++ }) ++ if err != nil { ++ return nil, err ++ } ++ unstructuredRaw, err := unstructured.NewGeneric(fsRaw, recognizer, pathExcluder) ++ if err != nil { ++ return nil, err ++ } ++ return NewGeneric(unstructuredRaw, emitter, GenericStorageOptions{ ++ SyncAtStart: true, ++ EmitSyncEvent: true, ++ }) ++} ++ ++// NewGeneric is an extended Storage implementation, which ++// together with the provided ObjectRecognizer and FileEventsEmitter listens for ++// file events, keeps the mappings of the filesystem.Storage's MappedFileFinder ++// in sync (s must use the mapped variant), and sends high-level ObjectEvents ++// upstream. ++// ++// Note: This WatchStorage only works for one-frame files (i.e. only one YAML document ++// per file is supported). ++func NewGeneric( ++ s unstructured.Storage, ++ emitter fileevents.Emitter, ++ opts GenericStorageOptions, ++) (UnstructuredEventStorage, error) { ++ return &Generic{ ++ Storage: s, ++ emitter: emitter, ++ ++ inbound: make(fileevents.FileEventStream, defaultEventsBufferSize), ++ // outbound set by WatchForObjectEvents ++ outboundMu: &gosync.Mutex{}, ++ ++ // monitor set by WatchForObjectEvents, guarded by outboundMu ++ ++ opts: opts, ++ }, nil ++} ++ ++type GenericStorageOptions struct { ++ // When Sync(ctx) is run, emit a "SYNC" event to the listening channel ++ // Default: false ++ EmitSyncEvent bool ++ // Do a full re-sync at startup of the watcher ++ // Default: true ++ SyncAtStart bool ++} ++ ++// Generic implements UnstructuredEventStorage. ++var _ UnstructuredEventStorage = &Generic{} ++ ++// Generic is an extended raw.Storage implementation, which provides a watcher ++// for watching changes in the directory managed by the embedded Storage's RawStorage. ++// If the RawStorage is a MappedRawStorage instance, it's mappings will automatically ++// be updated by the WatchStorage. Update events are sent to the given event stream. ++// Note: This WatchStorage only works for one-frame files (i.e. only one YAML document ++// per file is supported). ++// TODO: Update description ++type Generic struct { ++ unstructured.Storage ++ // the filesystem events emitter ++ emitter fileevents.Emitter ++ ++ // channels ++ inbound fileevents.FileEventStream ++ outbound event.ObjectEventStream ++ outboundMu *gosync.Mutex ++ ++ // goroutine ++ monitor *sync.Monitor ++ ++ // opts ++ opts GenericStorageOptions ++} ++ ++func (s *Generic) FileEventsEmitter() fileevents.Emitter { ++ return s.emitter ++} ++ ++func (s *Generic) WatchForObjectEvents(ctx context.Context, into event.ObjectEventStream) error { ++ s.outboundMu.Lock() ++ defer s.outboundMu.Unlock() ++ // We don't support more than one listener ++ // TODO: maybe support many listeners in the future? ++ if s.outbound != nil { ++ return fmt.Errorf("WatchStorage: not more than one watch supported: %w", fileevents.ErrTooManyWatches) ++ } ++ // Hook up our inbound channel to the emitter, to make the pipeline functional ++ if err := s.emitter.WatchForFileEvents(ctx, s.inbound); err != nil { ++ return err ++ } ++ // Set outbound at this stage so Sync possibly can send events. ++ s.outbound = into ++ // Start the backing goroutines ++ s.monitor = sync.RunMonitor(s.monitorFunc) ++ ++ // Do a full sync in the beginning only if asked. Be aware that without running a Sync ++ // at all before events start happening, the reporting might not work as it should ++ if s.opts.SyncAtStart { ++ // Disregard the changed files at Sync. ++ if _, err := s.Sync(ctx); err != nil { ++ return err ++ } ++ } ++ return nil // all ok ++} ++ ++func (s *Generic) Sync(ctx context.Context) ([]unstructured.ChecksumPathID, error) { ++ // Sync the underlying UnstructuredStorage, and see what files had changed since last sync ++ changedObjects, err := s.Storage.Sync(ctx) ++ if err != nil { ++ return nil, err ++ } ++ ++ // Send special "sync" events for each of the changed objects, if configured ++ if s.opts.EmitSyncEvent { ++ for _, changedObject := range changedObjects { ++ // Send a special "sync" event for this ObjectID to the events channel ++ s.sendEvent(event.ObjectEventSync, changedObject.ID) ++ } ++ } ++ ++ return changedObjects, nil ++} ++ ++// Write writes the given content to the resource indicated by the ID. ++// Error returns are implementation-specific. ++func (s *Generic) Write(ctx context.Context, id core.UnversionedObjectID, content []byte) error { ++ // Get the path and verify namespacing info ++ p, err := s.getPath(ctx, id) ++ if err != nil { ++ return err ++ } ++ // Suspend the write event ++ s.emitter.Suspend(ctx, p) ++ // Call the underlying filesystem.Storage ++ return s.Storage.Write(ctx, id, content) ++} ++ ++// Delete deletes the resource indicated by the ID. ++// If the resource does not exist, it returns ErrNotFound. ++func (s *Generic) Delete(ctx context.Context, id core.UnversionedObjectID) error { ++ // Get the path and verify namespacing info ++ p, err := s.getPath(ctx, id) ++ if err != nil { ++ return err ++ } ++ // Suspend the write event ++ s.emitter.Suspend(ctx, p) ++ // Call the underlying filesystem.Storage ++ return s.Storage.Delete(ctx, id) ++} ++ ++func (s *Generic) getPath(ctx context.Context, id core.UnversionedObjectID) (string, error) { ++ // Verify namespacing info ++ if err := storage.VerifyNamespaced(s.Namespacer(), id.GroupKind(), id.ObjectKey().Namespace); err != nil { ++ return "", err ++ } ++ // Get the path ++ return s.FileFinder().ObjectPath(ctx, id) ++} ++ ++func (s *Generic) Close() error { ++ err := s.emitter.Close() ++ // No need to check the error here ++ _ = s.monitor.Wait() ++ return err ++} ++ ++func (s *Generic) monitorFunc() error { ++ logrus.Debug("WatchStorage: Monitoring thread started") ++ defer logrus.Debug("WatchStorage: Monitoring thread stopped") ++ ++ ctx := context.Background() ++ ++ for { ++ // TODO: handle context cancellations, i.e. ctx.Done() ++ ev, ok := <-s.inbound ++ if !ok { ++ logrus.Error("WatchStorage: Fatal: Got non-ok response from watcher.GetFileEventStream()") ++ return nil ++ } ++ ++ logrus.Tracef("WatchStorage: Processing event: %s", ev.Type) ++ ++ // Skip the file if it has an invalid path ++ if !filesystem.IsValidFileInFilesystem( ++ ctx, ++ s.FileFinder().Filesystem(), ++ s.FileFinder().ContentTyper(), ++ s.PathExcluder(), ++ ev.Path) { ++ logrus.Tracef("WatchStorage: Skipping file %q as it is ignored by the ContentTyper/PathExcluder", ev.Path) ++ continue ++ } ++ ++ var err error ++ switch ev.Type { ++ // FileEventModify is also sent for newly-created files ++ case fileevents.FileEventModify, fileevents.FileEventMove: ++ err = s.handleModifyMove(ctx, ev) ++ case fileevents.FileEventDelete: ++ err = s.handleDelete(ctx, ev) ++ default: ++ err = fmt.Errorf("cannot handle update of type %v for path %q", ev.Type, ev.Path) ++ } ++ if err != nil { ++ logrus.Errorf("WatchStorage: %v", err) ++ } ++ } ++} ++ ++func (s *Generic) handleDelete(ctx context.Context, ev *fileevents.FileEvent) error { ++ // The object is deleted, so we need to do a reverse-lookup of what kind of object ++ // was there earlier, based on the path. This assumes that the filefinder organizes ++ // the known objects in such a way that it is able to do the reverse-lookup. For ++ // mapped FileFinders, by this point the path should still be in the local cache, ++ // which should make us able to get the ID before deleted from the cache. ++ objectID, err := s.MappedFileFinder().ObjectAt(ctx, ev.Path) ++ if err != nil { ++ return fmt.Errorf("failed to reverse lookup ID for deleted file %q: %v", ev.Path, err) ++ } ++ ++ // Remove the mapping from the FileFinder cache for this ID as it's now deleted ++ s.deleteMapping(ctx, objectID) ++ // Send the delete event to the channel ++ s.sendEvent(event.ObjectEventDelete, objectID) ++ return nil ++} ++ ++func (s *Generic) handleModifyMove(ctx context.Context, ev *fileevents.FileEvent) error { ++ // Read the content of this modified, moved or created file ++ content, err := s.FileFinder().Filesystem().ReadFile(ctx, ev.Path) ++ if err != nil { ++ return fmt.Errorf("could not read %q: %v", ev.Path, err) ++ } ++ ++ // Try to recognize the object ++ versionedID, err := s.ObjectRecognizer().ResolveObjectID(ctx, ev.Path, content) ++ if err != nil { ++ return fmt.Errorf("did not recognize object at path %q: %v", ev.Path, err) ++ } ++ ++ // If the file was just moved around, just overwrite the earlier mapping ++ if ev.Type == fileevents.FileEventMove { ++ // This assumes that the file content does not change in the move ++ // operation. TODO: document this as a requirement for the Emitter. ++ s.setMapping(ctx, versionedID, ev.Path) ++ ++ // Internal move events are a no-op ++ return nil ++ } ++ ++ // Determine if this object already existed in the fileFinder's cache, ++ // in order to find out if the object was created or modified (default). ++ // TODO: In the future, maybe support multiple files pointing to the same ++ // ObjectID? Case in point here is e.g. a Modify event for a known path that ++ // changes the underlying ObjectID. ++ objectEvent := event.ObjectEventUpdate ++ // Set the mapping if it didn't exist before; assume this is a Create event ++ if _, ok := s.MappedFileFinder().GetMapping(ctx, versionedID); !ok { ++ // This is what actually determines if an Object is created, ++ // so update the event to update.ObjectEventCreate here ++ objectEvent = event.ObjectEventCreate ++ } ++ // Update the mapping between this object and path (this updates ++ // the checksum underneath too). ++ s.setMapping(ctx, versionedID, ev.Path) ++ // Send the event to the channel ++ s.sendEvent(objectEvent, versionedID) ++ return nil ++} ++ ++func (s *Generic) sendEvent(eventType event.ObjectEventType, id core.UnversionedObjectID) { ++ logrus.Tracef("Generic: Sending event: %v", eventType) ++ s.outbound <- &event.ObjectEvent{ ++ ID: id, ++ Type: eventType, ++ } ++} ++ ++// setMapping registers a mapping between the given object and the specified path, if raw is a ++// MappedRawStorage. If a given mapping already exists between this object and some path, it ++// will be overridden with the specified new path ++func (s *Generic) setMapping(ctx context.Context, id core.UnversionedObjectID, path string) { ++ // Get the current checksum of the new file ++ checksum, err := s.MappedFileFinder().Filesystem().Checksum(ctx, path) ++ if err != nil { ++ logrus.Errorf("Unexpected error when getting checksum of file %q: %v", path, err) ++ return ++ } ++ // Register the current state in the cache ++ s.MappedFileFinder().SetMapping(ctx, id, unstructured.ChecksumPath{ ++ Path: path, ++ Checksum: checksum, ++ }) ++} ++ ++// deleteMapping removes a mapping a file that doesn't exist ++func (s *Generic) deleteMapping(ctx context.Context, id core.UnversionedObjectID) { ++ s.MappedFileFinder().DeleteMapping(ctx, id) ++} +diff --git a/pkg/storage/filesystem/unstructured/filefinder_mapped.go b/pkg/storage/filesystem/unstructured/filefinder_mapped.go +new file mode 100644 +index 0000000..274da22 +--- /dev/null ++++ b/pkg/storage/filesystem/unstructured/filefinder_mapped.go +@@ -0,0 +1,157 @@ ++package unstructured ++ ++import ( ++ "context" ++ "errors" ++ ++ "github.com/weaveworks/libgitops/pkg/storage/core" ++ "github.com/weaveworks/libgitops/pkg/storage/filesystem" ++ utilerrs "k8s.io/apimachinery/pkg/util/errors" ++ "k8s.io/apimachinery/pkg/util/sets" ++) ++ ++var ( ++ // ErrNotTracked is returned when the requested resource wasn't found. ++ ErrNotTracked = errors.New("untracked object") ++) ++ ++// GenericMappedFileFinder implements MappedFileFinder. ++var _ MappedFileFinder = &GenericMappedFileFinder{} ++ ++// NewGenericMappedFileFinder creates a new instance of GenericMappedFileFinder, ++// that implements the MappedFileFinder interface. The contentTyper is optional, ++// by default core.DefaultContentTyper will be used. ++func NewGenericMappedFileFinder(contentTyper filesystem.ContentTyper, fs filesystem.Filesystem) MappedFileFinder { ++ if contentTyper == nil { ++ contentTyper = filesystem.DefaultContentTyper ++ } ++ if fs == nil { ++ panic("NewGenericMappedFileFinder: fs is mandatory") ++ } ++ return &GenericMappedFileFinder{ ++ contentTyper: contentTyper, ++ // TODO: Support multiple branches ++ branch: &branchImpl{}, ++ fs: fs, ++ } ++} ++ ++// GenericMappedFileFinder is a generic implementation of MappedFileFinder. ++// It uses a ContentTyper to identify what content type a file uses. ++// ++// This implementation relies on that all information about what files exist ++// is fed through SetMapping(s). If a file or ID is requested that doesn't ++// exist in the internal cache, ErrNotTracked will be returned. ++// ++// Hence, this implementation does not at the moment support creating net-new ++// Objects without someone calling SetMapping() first. ++type GenericMappedFileFinder struct { ++ // Default: DefaultContentTyper ++ contentTyper filesystem.ContentTyper ++ fs filesystem.Filesystem ++ ++ branch branch ++} ++ ++func (f *GenericMappedFileFinder) Filesystem() filesystem.Filesystem { ++ return f.fs ++} ++ ++func (f *GenericMappedFileFinder) ContentTyper() filesystem.ContentTyper { ++ return f.contentTyper ++} ++ ++// ObjectPath gets the file path relative to the root directory ++func (f *GenericMappedFileFinder) ObjectPath(ctx context.Context, id core.UnversionedObjectID) (string, error) { ++ cp, ok := f.GetMapping(ctx, id) ++ if !ok { ++ // TODO: separate interface for "new creates"? ++ return "", utilerrs.NewAggregate([]error{ErrNotTracked, core.NewErrNotFound(id)}) ++ } ++ return cp.Path, nil ++} ++ ++// ObjectAt retrieves the ID containing the virtual path based ++// on the given physical file path. ++func (f *GenericMappedFileFinder) ObjectAt(ctx context.Context, path string) (core.UnversionedObjectID, error) { ++ // TODO: Add reverse tracking too? ++ for gk, gkIter := range f.branch.raw() { ++ for ns, nsIter := range gkIter.raw() { ++ for name, cp := range nsIter.raw() { ++ if cp.Path == path { ++ return core.NewUnversionedObjectID(gk, core.ObjectKey{Name: name, Namespace: ns}), nil ++ } ++ } ++ } ++ } ++ // TODO: Support "creation" of Objects easier, in a generic way through an interface, e.g. ++ // NewObjectPlacer? ++ return nil, ErrNotTracked ++} ++ ++// ListNamespaces lists the available namespaces for the given GroupKind. ++// This function shall only be called for namespaced objects, it is up to ++// the caller to make sure they do not call this method for root-spaced ++// objects. If any of the given rules are violated, ErrNamespacedMismatch ++// should be returned as a wrapped error. ++// ++// The implementer can choose between basing the answer strictly on e.g. ++// v1.Namespace objects that exist in the system, or just the set of ++// different namespaces that have been set on any object belonging to ++// the given GroupKind. ++func (f *GenericMappedFileFinder) ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.String, error) { ++ m := f.branch.groupKind(gk).raw() ++ nsSet := sets.NewString() ++ for ns := range m { ++ nsSet.Insert(ns) ++ } ++ return nsSet, nil ++} ++ ++// ListObjectIDs returns a list of unversioned ObjectIDs. ++// For namespaced GroupKinds, the caller must provide a namespace, and for ++// root-spaced GroupKinds, the caller must not. When namespaced, this function ++// must only return object IDs for that given namespace. If any of the given ++// rules are violated, ErrNamespacedMismatch should be returned as a wrapped error. ++func (f *GenericMappedFileFinder) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) ([]core.UnversionedObjectID, error) { ++ m := f.branch.groupKind(gk).namespace(namespace).raw() ++ ids := make([]core.UnversionedObjectID, 0, len(m)) ++ for name := range m { ++ ids = append(ids, core.NewUnversionedObjectID(gk, core.ObjectKey{Name: name, Namespace: namespace})) ++ } ++ return ids, nil ++} ++ ++// GetMapping retrieves a mapping in the system ++func (f *GenericMappedFileFinder) GetMapping(ctx context.Context, id core.UnversionedObjectID) (ChecksumPath, bool) { ++ cp, ok := f.branch. ++ groupKind(id.GroupKind()). ++ namespace(id.ObjectKey().Namespace). ++ name(id.ObjectKey().Name) ++ return cp, ok ++} ++ ++// SetMapping binds an ID's virtual path to a physical file path ++func (f *GenericMappedFileFinder) SetMapping(ctx context.Context, id core.UnversionedObjectID, checksumPath ChecksumPath) { ++ f.branch. ++ groupKind(id.GroupKind()). ++ namespace(id.ObjectKey().Namespace). ++ setName(id.ObjectKey().Name, checksumPath) ++} ++ ++// ResetMappings replaces all mappings at once ++func (f *GenericMappedFileFinder) ResetMappings(ctx context.Context, m map[core.UnversionedObjectID]ChecksumPath) { ++ f.branch = &branchImpl{} ++ for id, cp := range m { ++ f.SetMapping(ctx, id, cp) ++ } ++} ++ ++// DeleteMapping removes the physical file path mapping ++// matching the given id ++func (f *GenericMappedFileFinder) DeleteMapping(ctx context.Context, id core.UnversionedObjectID) { ++ f.branch. ++ groupKind(id.GroupKind()). ++ namespace(id.ObjectKey().Namespace). ++ deleteName(id.ObjectKey().Name) ++} +diff --git a/pkg/storage/filesystem/unstructured/interfaces.go b/pkg/storage/filesystem/unstructured/interfaces.go +new file mode 100644 +index 0000000..814b437 +--- /dev/null ++++ b/pkg/storage/filesystem/unstructured/interfaces.go +@@ -0,0 +1,75 @@ ++package unstructured ++ ++import ( ++ "context" ++ ++ "github.com/weaveworks/libgitops/pkg/storage/core" ++ "github.com/weaveworks/libgitops/pkg/storage/filesystem" ++) ++ ++// Storage is a raw Storage interface that builds on top ++// of Storage. It uses an ObjectRecognizer to recognize ++// otherwise unknown objects in unstructured files. ++// The Storage must use a MappedFileFinder underneath. ++// ++// Multiple Objects in the same file, or multiple Objects with the ++// same ID in multiple files are not supported. ++type Storage interface { ++ filesystem.Storage ++ ++ // Sync synchronizes the current state of the filesystem with the ++ // cached mappings in the MappedFileFinder. ++ Sync(ctx context.Context) ([]ChecksumPathID, error) ++ ++ // ObjectRecognizer returns the underlying ObjectRecognizer used. ++ ObjectRecognizer() core.ObjectRecognizer ++ // PathExcluder specifies what paths to not sync ++ PathExcluder() filesystem.PathExcluder ++ // MappedFileFinder returns the underlying MappedFileFinder used. ++ MappedFileFinder() MappedFileFinder ++} ++ ++// MappedFileFinder is an extension to FileFinder that allows it to have an internal ++// cache with mappings between UnversionedObjectID and a ChecksumPath. This allows ++// higher-order interfaces to manage Objects in files in an unorganized directory ++// (e.g. a Git repo). ++// ++// Multiple Objects in the same file, or multiple Objects with the ++// same ID in multiple files are not supported. ++type MappedFileFinder interface { ++ filesystem.FileFinder ++ ++ // GetMapping retrieves a mapping in the system. ++ GetMapping(ctx context.Context, id core.UnversionedObjectID) (ChecksumPath, bool) ++ // SetMapping binds an ID to a physical file path. This operation overwrites ++ // any previous mapping for id. ++ SetMapping(ctx context.Context, id core.UnversionedObjectID, checksumPath ChecksumPath) ++ // ResetMappings replaces all mappings at once to the ones in m. ++ ResetMappings(ctx context.Context, m map[core.UnversionedObjectID]ChecksumPath) ++ // DeleteMapping removes the mapping for the given id. ++ DeleteMapping(ctx context.Context, id core.UnversionedObjectID) ++} ++ ++// ChecksumPath is a tuple of a given Checksum and relative file Path, ++// for use in MappedFileFinder. ++type ChecksumPath struct { ++ // Checksum is the checksum of the file at the given path. ++ // ++ // What the checksum is is application-dependent, however, it ++ // should be the same for two invocations, as long as the stored ++ // data is the same. It might change over time although the ++ // underlying data did not. Examples of checksums that can be ++ // used is: the file modification timestamp, a sha256sum of the ++ // file content, or the latest Git commit when the file was ++ // changed. ++ // ++ // The checksum is calculated by the filesystem.Filesystem. ++ Checksum string ++ // Path to the file, relative to filesystem.Filesystem.RootDirectory(). ++ Path string ++} ++ ++type ChecksumPathID struct { ++ ChecksumPath ++ ID core.ObjectID ++} +diff --git a/pkg/storage/filesystem/unstructured/mapped_cache.go b/pkg/storage/filesystem/unstructured/mapped_cache.go +new file mode 100644 +index 0000000..08aeb83 +--- /dev/null ++++ b/pkg/storage/filesystem/unstructured/mapped_cache.go +@@ -0,0 +1,104 @@ ++package unstructured ++ ++import "github.com/weaveworks/libgitops/pkg/storage/core" ++ ++// This file contains a set of private interfaces and implementations ++// that allows caching mappings between a core.UnversionedObjectID ++// and a ChecksumPath. ++ ++// TODO: rename this interface ++type branch interface { ++ groupKind(core.GroupKind) groupKind ++ raw() map[core.GroupKind]groupKind ++} ++ ++type groupKind interface { ++ namespace(string) namespace ++ raw() map[string]namespace ++} ++ ++type namespace interface { ++ name(string) (ChecksumPath, bool) ++ setName(string, ChecksumPath) ++ deleteName(string) ++ raw() map[string]ChecksumPath ++} ++ ++type branchImpl struct { ++ m map[core.GroupKind]groupKind ++} ++ ++func (b *branchImpl) groupKind(gk core.GroupKind) groupKind { ++ if b.m == nil { ++ b.m = make(map[core.GroupKind]groupKind) ++ } ++ val, ok := b.m[gk] ++ if !ok { ++ val = &groupKindImpl{} ++ b.m[gk] = val ++ } ++ return val ++} ++ ++func (b *branchImpl) raw() map[core.GroupKind]groupKind { ++ if b.m == nil { ++ b.m = make(map[core.GroupKind]groupKind) ++ } ++ return b.m ++} ++ ++type groupKindImpl struct { ++ m map[string]namespace ++} ++ ++func (g *groupKindImpl) namespace(ns string) namespace { ++ if g.m == nil { ++ g.m = make(map[string]namespace) ++ } ++ val, ok := g.m[ns] ++ if !ok { ++ val = &namespaceImpl{} ++ g.m[ns] = val ++ } ++ return val ++} ++ ++func (g *groupKindImpl) raw() map[string]namespace { ++ if g.m == nil { ++ g.m = make(map[string]namespace) ++ } ++ return g.m ++} ++ ++type namespaceImpl struct { ++ m map[string]ChecksumPath ++} ++ ++func (n *namespaceImpl) name(name string) (ChecksumPath, bool) { ++ if n.m == nil { ++ n.m = make(map[string]ChecksumPath) ++ } ++ cp, ok := n.m[name] ++ return cp, ok ++} ++ ++func (n *namespaceImpl) setName(name string, cp ChecksumPath) { ++ if n.m == nil { ++ n.m = make(map[string]ChecksumPath) ++ } ++ n.m[name] = cp ++} ++ ++func (n *namespaceImpl) deleteName(name string) { ++ if n.m == nil { ++ n.m = make(map[string]ChecksumPath) ++ } ++ delete(n.m, name) ++} ++ ++func (n *namespaceImpl) raw() map[string]ChecksumPath { ++ if n.m == nil { ++ n.m = make(map[string]ChecksumPath) ++ } ++ return n.m ++} +diff --git a/pkg/storage/filesystem/unstructured/storage.go b/pkg/storage/filesystem/unstructured/storage.go +new file mode 100644 +index 0000000..9109734 +--- /dev/null ++++ b/pkg/storage/filesystem/unstructured/storage.go +@@ -0,0 +1,120 @@ ++package unstructured ++ ++import ( ++ "context" ++ "errors" ++ "fmt" ++ ++ "github.com/sirupsen/logrus" ++ "github.com/weaveworks/libgitops/pkg/storage/core" ++ "github.com/weaveworks/libgitops/pkg/storage/filesystem" ++) ++ ++func NewGeneric(storage filesystem.Storage, recognizer core.ObjectRecognizer, pathExcluder filesystem.PathExcluder) (Storage, error) { ++ if storage == nil { ++ return nil, fmt.Errorf("storage is mandatory") ++ } ++ if recognizer == nil { ++ return nil, fmt.Errorf("recognizer is mandatory") ++ } ++ mappedFileFinder, ok := storage.FileFinder().(MappedFileFinder) ++ if !ok { ++ return nil, errors.New("the given filesystem.Storage must use a MappedFileFinder") ++ } ++ return &Generic{ ++ Storage: storage, ++ recognizer: recognizer, ++ mappedFileFinder: mappedFileFinder, ++ pathExcluder: pathExcluder, ++ }, nil ++} ++ ++type Generic struct { ++ filesystem.Storage ++ recognizer core.ObjectRecognizer ++ mappedFileFinder MappedFileFinder ++ pathExcluder filesystem.PathExcluder ++} ++ ++// Sync synchronizes the current state of the filesystem with the ++// cached mappings in the MappedFileFinder. ++func (s *Generic) Sync(ctx context.Context) ([]ChecksumPathID, error) { ++ fileFinder := s.MappedFileFinder() ++ ++ // List all valid files in the fs ++ files, err := filesystem.ListValidFilesInFilesystem( ++ ctx, ++ fileFinder.Filesystem(), ++ fileFinder.ContentTyper(), ++ s.PathExcluder(), ++ ) ++ if err != nil { ++ return nil, err ++ } ++ ++ // Send SYNC events for all files (and fill the mappings ++ // of the MappedFileFinder) before starting to monitor changes ++ updatedFiles := make([]ChecksumPathID, 0, len(files)) ++ for _, filePath := range files { ++ // Get the current checksum of the file ++ currentChecksum, err := fileFinder.Filesystem().Checksum(ctx, filePath) ++ if err != nil { ++ logrus.Errorf("Could not get checksum for file %q: %v", filePath, err) ++ continue ++ } ++ ++ // If the given file already is tracked; i.e. has a mapping with a ++ // non-empty checksum, and the current checksum matches, we do not ++ // need to do anything. ++ if id, err := fileFinder.ObjectAt(ctx, filePath); err == nil { ++ if cp, ok := fileFinder.GetMapping(ctx, id); ok && len(cp.Checksum) != 0 { ++ if cp.Checksum == currentChecksum { ++ logrus.Tracef("Checksum for file %q is up-to-date: %q, skipping...", filePath, cp.Checksum) ++ continue ++ } ++ } ++ } ++ ++ // If the file is not known to the FileFinder yet, or if the checksum ++ // was empty, read the file, and recognize it. ++ content, err := s.FileFinder().Filesystem().ReadFile(ctx, filePath) ++ if err != nil { ++ logrus.Warnf("Ignoring %q: %v", filePath, err) ++ continue ++ } ++ ++ id, err := s.recognizer.ResolveObjectID(ctx, filePath, content) ++ if err != nil { ++ logrus.Warnf("Could not recognize object ID in %q: %v", filePath, err) ++ continue ++ } ++ ++ // Add a mapping between this object and path ++ cp := ChecksumPath{ ++ Checksum: currentChecksum, ++ Path: filePath, ++ } ++ s.MappedFileFinder().SetMapping(ctx, id, cp) ++ // Add to the slice which we'll return ++ updatedFiles = append(updatedFiles, ChecksumPathID{ ++ ChecksumPath: cp, ++ ID: id, ++ }) ++ } ++ return updatedFiles, nil ++} ++ ++// ObjectRecognizer returns the underlying ObjectRecognizer used. ++func (s *Generic) ObjectRecognizer() core.ObjectRecognizer { ++ return s.recognizer ++} ++ ++// PathExcluder specifies what paths to not sync ++func (s *Generic) PathExcluder() filesystem.PathExcluder { ++ return s.pathExcluder ++} ++ ++// MappedFileFinder returns the underlying MappedFileFinder used. ++func (s *Generic) MappedFileFinder() MappedFileFinder { ++ return s.mappedFileFinder ++} +diff --git a/pkg/storage/format.go b/pkg/storage/format.go +deleted file mode 100644 +index 84993ce..0000000 +--- a/pkg/storage/format.go ++++ /dev/null +@@ -1,20 +0,0 @@ +-package storage +- +-import "github.com/weaveworks/libgitops/pkg/serializer" +- +-// ContentTypes describes the connection between +-// file extensions and a content types. +-var ContentTypes = map[string]serializer.ContentType{ +- ".json": serializer.ContentTypeJSON, +- ".yaml": serializer.ContentTypeYAML, +- ".yml": serializer.ContentTypeYAML, +-} +- +-func extForContentType(wanted serializer.ContentType) string { +- for ext, ct := range ContentTypes { +- if ct == wanted { +- return ext +- } +- } +- return "" +-} +diff --git a/pkg/storage/interfaces.go b/pkg/storage/interfaces.go +new file mode 100644 +index 0000000..c5698e0 +--- /dev/null ++++ b/pkg/storage/interfaces.go +@@ -0,0 +1,103 @@ ++package storage ++ ++import ( ++ "context" ++ "errors" ++ ++ "github.com/weaveworks/libgitops/pkg/serializer" ++ "github.com/weaveworks/libgitops/pkg/storage/core" ++ "k8s.io/apimachinery/pkg/util/sets" ++) ++ ++var ( ++ // ErrNamespacedMismatch is returned by Storage methods if the given UnversionedObjectID ++ // carries invalid data, according to the Namespacer. ++ ErrNamespacedMismatch = errors.New("mismatch between namespacing info for object and the given parameter") ++) ++ ++// Storage is a Key-indexed low-level interface to ++// store byte-encoded Objects (resources) in non-volatile ++// memory. ++// ++// This Storage operates entirely on GroupKinds; without enforcing ++// a specific version of the encoded data format. This is possible ++// with the assumption that any older format stored at disk can be ++// read successfully and converted into a more recent version. ++// ++// TODO: Add thread-safety so it is not possible to issue a Write() or Delete() ++// at the same time as any other read operation. ++type Storage interface { ++ Reader ++ Writer ++} ++ ++// StorageCommon is an interface that contains the resources both needed ++// by Reader and Writer. ++type StorageCommon interface { ++ // Namespacer gives access to the namespacer that is used ++ Namespacer() core.Namespacer ++ // Exists checks if the resource indicated by the ID exists. ++ Exists(ctx context.Context, id core.UnversionedObjectID) bool ++} ++ ++// Reader provides the read operations for the Storage. ++type Reader interface { ++ StorageCommon ++ ++ // Read returns a resource's content based on the ID. ++ // If the resource does not exist, it returns core.NewErrNotFound. ++ Read(ctx context.Context, id core.UnversionedObjectID) ([]byte, error) ++ ++ // Checksum returns a checksum of the Object with the given ID. ++ // ++ // What the checksum is is application-dependent, however, it ++ // should be the same for two invocations, as long as the stored ++ // data is the same. It might change over time although the ++ // underlying data did not. Examples of checksums that can be ++ // used is: the file modification timestamp, a sha256sum of the ++ // file content, or the latest Git commit when the file was ++ // changed. ++ Checksum(ctx context.Context, id core.UnversionedObjectID) (string, error) ++ ++ // ContentType returns the content type that should be used when serializing ++ // the object with the given ID. This operation must function also before the ++ // Object with the given id exists in the system, in order to be able to ++ // create new Objects. ++ ContentType(ctx context.Context, id core.UnversionedObjectID) (serializer.ContentType, error) ++ ++ // List operations ++ Lister ++} ++ ++type Lister interface { ++ // ListNamespaces lists the available namespaces for the given GroupKind. ++ // This function shall only be called for namespaced objects, it is up to ++ // the caller to make sure they do not call this method for root-spaced ++ // objects. If any of the given rules are violated, ErrNamespacedMismatch ++ // should be returned as a wrapped error. ++ // ++ // The implementer can choose between basing the answer strictly on e.g. ++ // v1.Namespace objects that exist in the system, or just the set of ++ // different namespaces that have been set on any object belonging to ++ // the given GroupKind. ++ ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.String, error) ++ ++ // ListObjectIDs returns a list of unversioned ObjectIDs. ++ // For namespaced GroupKinds, the caller must provide a namespace, and for ++ // root-spaced GroupKinds, the caller must not. When namespaced, this function ++ // must only return object IDs for that given namespace. If any of the given ++ // rules are violated, ErrNamespacedMismatch should be returned as a wrapped error. ++ ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) ([]core.UnversionedObjectID, error) ++} ++ ++// Reader provides the write operations for the Storage. ++type Writer interface { ++ StorageCommon ++ ++ // Write writes the given content to the resource indicated by the ID. ++ // Error returns are implementation-specific. ++ Write(ctx context.Context, id core.UnversionedObjectID, content []byte) error ++ // Delete deletes the resource indicated by the ID. ++ // If the resource does not exist, it returns ErrNotFound. ++ Delete(ctx context.Context, id core.UnversionedObjectID) error ++} +diff --git a/pkg/storage/key.go b/pkg/storage/key.go +deleted file mode 100644 +index 015cac4..0000000 +--- a/pkg/storage/key.go ++++ /dev/null +@@ -1,64 +0,0 @@ +-package storage +- +-import ( +- "github.com/weaveworks/libgitops/pkg/runtime" +- "k8s.io/apimachinery/pkg/runtime/schema" +-) +- +-type kindKey schema.GroupVersionKind +- +-func (gvk kindKey) GetGroup() string { return gvk.Group } +-func (gvk kindKey) GetVersion() string { return gvk.Version } +-func (gvk kindKey) GetKind() string { return gvk.Kind } +-func (gvk kindKey) GetGVK() schema.GroupVersionKind { return schema.GroupVersionKind(gvk) } +-func (gvk kindKey) EqualsGVK(kind KindKey, respectVersion bool) bool { +- // Make sure kind and group match, otherwise return false +- if gvk.GetKind() != kind.GetKind() || gvk.GetGroup() != kind.GetGroup() { +- return false +- } +- // If we allow version mismatches (i.e. don't need to respect the version), return true +- if !respectVersion { +- return true +- } +- // Otherwise, return true if the version also is the same +- return gvk.GetVersion() == kind.GetVersion() +-} +-func (gvk kindKey) String() string { return gvk.GetGVK().String() } +- +-// kindKey implements KindKey. +-var _ KindKey = kindKey{} +- +-type KindKey interface { +- // String implements fmt.Stringer +- String() string +- +- GetGroup() string +- GetVersion() string +- GetKind() string +- GetGVK() schema.GroupVersionKind +- +- EqualsGVK(kind KindKey, respectVersion bool) bool +-} +- +-type ObjectKey interface { +- KindKey +- runtime.Identifyable +-} +- +-// objectKey implements ObjectKey. +-var _ ObjectKey = &objectKey{} +- +-type objectKey struct { +- KindKey +- runtime.Identifyable +-} +- +-func (key objectKey) String() string { return key.KindKey.String() + " " + key.GetIdentifier() } +- +-func NewKindKey(gvk schema.GroupVersionKind) KindKey { +- return kindKey(gvk) +-} +- +-func NewObjectKey(kind KindKey, id runtime.Identifyable) ObjectKey { +- return objectKey{kind, id} +-} +diff --git a/pkg/storage/kube/namespaces.go b/pkg/storage/kube/namespaces.go +new file mode 100644 +index 0000000..3e509ce +--- /dev/null ++++ b/pkg/storage/kube/namespaces.go +@@ -0,0 +1,111 @@ ++package kube ++ ++import ( ++ "sync" ++ ++ "github.com/weaveworks/libgitops/pkg/storage/backend" ++ "github.com/weaveworks/libgitops/pkg/storage/core" ++ "k8s.io/apimachinery/pkg/api/meta" ++ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ++ "k8s.io/apimachinery/pkg/runtime/schema" ++) ++ ++// TODO: Make an example component that iterates through all of a raw.Storage's ++// or FileFinder's objects, and just reads them, converts them into the current ++// hub version. ++ ++// TODO: Make a composite Storage that encrypts secrets using a key ++ ++// NewNamespaceEnforcer returns a backend.NamespaceEnforcer that ++// enforces namespacing rules (approximately) in the same way as ++// Kubernetes itself does. The following rules are applied: ++// ++// if object is namespaced { ++// if .metadata.namespace == "" { ++// .metadata.namespace = "default" ++// } else { // .metadata.namespace != "" ++// Make sure that such a v1.Namespace object ++// exists in the system. ++// } ++// } else { // object is non-namespaced ++// if .metadata.namespace != "" { ++// .metadata.namespace = "" ++// } ++// } ++// ++// Underneath, backend.GenericNamespaceEnforcer is used. Refer ++// to the documentation of that if you want the functionality ++// to be slightly different. (e.g. any namespace value is valid). ++// ++// TODO: Maybe we want to validate the namespace string itself? ++func NewNamespaceEnforcer() backend.NamespaceEnforcer { ++ return backend.GenericNamespaceEnforcer{ ++ DefaultNamespace: metav1.NamespaceDefault, ++ NamespaceGroupKind: &core.GroupKind{ ++ Group: "", // legacy name for the core API group ++ Kind: "Namespace", ++ }, ++ } ++} ++ ++// SimpleRESTMapper is a subset of the meta.RESTMapper interface ++type SimpleRESTMapper interface { ++ // RESTMapping identifies a preferred resource mapping for the provided group kind. ++ RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) ++} ++ ++// RESTMapperToNamespacer implements the Namespacer interface by fetching (and caching) data ++// from the given RESTMapper interface, that is compatible with any meta.RESTMapper implementation. ++// This allows you to e.g. pass in a meta.RESTMapper yielded from ++// sigs.k8s.io/controller-runtime/pkg/client/apiutil.NewDiscoveryRESTMapper(c *rest.Config), or ++// k8s.io/client-go/restmapper.NewDiscoveryRESTMapper(groups []*restmapper.APIGroupResources) ++// in order to look up namespacing information from either a running API server, or statically, from ++// the list of restmapper.APIGroupResources. ++func RESTMapperToNamespacer(mapper SimpleRESTMapper) core.Namespacer { ++ return &restNamespacer{ ++ mapper: mapper, ++ mappingByType: make(map[schema.GroupKind]*meta.RESTMapping), ++ mu: &sync.RWMutex{}, ++ } ++} ++ ++var _ core.Namespacer = &restNamespacer{} ++ ++type restNamespacer struct { ++ mapper SimpleRESTMapper ++ ++ mappingByType map[schema.GroupKind]*meta.RESTMapping ++ mu *sync.RWMutex ++} ++ ++func (n *restNamespacer) IsNamespaced(gk schema.GroupKind) (bool, error) { ++ m, err := n.getMapping(gk) ++ if err != nil { ++ return false, err ++ } ++ return mappingNamespaced(m), nil ++} ++ ++func (n *restNamespacer) getMapping(gk schema.GroupKind) (*meta.RESTMapping, error) { ++ n.mu.RLock() ++ mapping, ok := n.mappingByType[gk] ++ n.mu.RUnlock() ++ // If already cached, we're ok ++ if ok { ++ return mapping, nil ++ } ++ ++ // Write the mapping info to our cache ++ n.mu.Lock() ++ defer n.mu.Unlock() ++ m, err := n.mapper.RESTMapping(gk) ++ if err != nil { ++ return nil, err ++ } ++ n.mappingByType[gk] = m ++ return m, nil ++} ++ ++func mappingNamespaced(mapping *meta.RESTMapping) bool { ++ return mapping.Scope.Name() == meta.RESTScopeNameNamespace ++} +diff --git a/pkg/storage/mappedrawstorage.go b/pkg/storage/mappedrawstorage.go +deleted file mode 100644 +index d41641c..0000000 +--- a/pkg/storage/mappedrawstorage.go ++++ /dev/null +@@ -1,177 +0,0 @@ +-package storage +- +-import ( +- "fmt" +- "io/ioutil" +- "os" +- "path/filepath" +- "sync" +- +- log "github.com/sirupsen/logrus" +- "github.com/weaveworks/libgitops/pkg/serializer" +- "github.com/weaveworks/libgitops/pkg/util" +-) +- +-var ( +- // ErrNotTracked is returned when the requested resource wasn't found. +- ErrNotTracked = fmt.Errorf("untracked object: %w", ErrNotFound) +-) +- +-// MappedRawStorage is an interface for RawStorages which store their +-// data in a flat/unordered directory format like manifest directories. +-type MappedRawStorage interface { +- RawStorage +- +- // AddMapping binds a Key's virtual path to a physical file path +- AddMapping(key ObjectKey, path string) +- // RemoveMapping removes the physical file +- // path mapping matching the given Key +- RemoveMapping(key ObjectKey) +- +- // SetMappings overwrites all known mappings +- SetMappings(m map[ObjectKey]string) +-} +- +-func NewGenericMappedRawStorage(dir string) MappedRawStorage { +- return &GenericMappedRawStorage{ +- dir: dir, +- fileMappings: make(map[ObjectKey]string), +- mux: &sync.Mutex{}, +- } +-} +- +-// GenericMappedRawStorage is the default implementation of a MappedRawStorage, +-// it stores files in the given directory via a path translation map. +-type GenericMappedRawStorage struct { +- dir string +- fileMappings map[ObjectKey]string +- mux *sync.Mutex +-} +- +-func (r *GenericMappedRawStorage) realPath(key ObjectKey) (string, error) { +- r.mux.Lock() +- path, ok := r.fileMappings[key] +- r.mux.Unlock() +- if !ok { +- return "", fmt.Errorf("GenericMappedRawStorage: cannot resolve %q: %w", key, ErrNotTracked) +- } +- +- return path, nil +-} +- +-// If the file doesn't exist, returns ErrNotFound + ErrNotTracked. +-func (r *GenericMappedRawStorage) Read(key ObjectKey) ([]byte, error) { +- file, err := r.realPath(key) +- if err != nil { +- return nil, err +- } +- +- return ioutil.ReadFile(file) +-} +- +-func (r *GenericMappedRawStorage) Exists(key ObjectKey) bool { +- file, err := r.realPath(key) +- if err != nil { +- return false +- } +- +- return util.FileExists(file) +-} +- +-func (r *GenericMappedRawStorage) Write(key ObjectKey, content []byte) error { +- // GenericMappedRawStorage isn't going to generate files itself, +- // only write if the file is already known +- file, err := r.realPath(key) +- if err != nil { +- return err +- } +- +- return ioutil.WriteFile(file, content, 0644) +-} +- +-// If the file doesn't exist, returns ErrNotFound + ErrNotTracked. +-func (r *GenericMappedRawStorage) Delete(key ObjectKey) (err error) { +- file, err := r.realPath(key) +- if err != nil { +- return +- } +- +- // GenericMappedRawStorage files can be deleted +- // externally, check that the file exists first +- if util.FileExists(file) { +- err = os.Remove(file) +- } +- +- if err == nil { +- r.RemoveMapping(key) +- } +- +- return +-} +- +-func (r *GenericMappedRawStorage) List(kind KindKey) ([]ObjectKey, error) { +- result := make([]ObjectKey, 0) +- +- for key := range r.fileMappings { +- // Include objects with the same kind and group, ignore version mismatches +- if key.EqualsGVK(kind, false) { +- result = append(result, key) +- } +- } +- +- return result, nil +-} +- +-// This returns the modification time as a UnixNano string. +-// If the file doesn't exist, returns ErrNotFound + ErrNotTracked. +-func (r *GenericMappedRawStorage) Checksum(key ObjectKey) (string, error) { +- path, err := r.realPath(key) +- if err != nil { +- return "", err +- } +- +- return checksumFromModTime(path) +-} +- +-func (r *GenericMappedRawStorage) ContentType(key ObjectKey) (ct serializer.ContentType) { +- if file, err := r.realPath(key); err == nil { +- ct = ContentTypes[filepath.Ext(file)] // Retrieve the correct format based on the extension +- } +- +- return +-} +- +-func (r *GenericMappedRawStorage) WatchDir() string { +- return r.dir +-} +- +-func (r *GenericMappedRawStorage) GetKey(path string) (ObjectKey, error) { +- for key, p := range r.fileMappings { +- if p == path { +- return key, nil +- } +- } +- +- return objectKey{}, fmt.Errorf("no mapping found for path %q", path) +-} +- +-func (r *GenericMappedRawStorage) AddMapping(key ObjectKey, path string) { +- log.Debugf("GenericMappedRawStorage: AddMapping: %q -> %q", key, path) +- r.mux.Lock() +- r.fileMappings[key] = path +- r.mux.Unlock() +-} +- +-func (r *GenericMappedRawStorage) RemoveMapping(key ObjectKey) { +- log.Debugf("GenericMappedRawStorage: RemoveMapping: %q", key) +- r.mux.Lock() +- delete(r.fileMappings, key) +- r.mux.Unlock() +-} +- +-func (r *GenericMappedRawStorage) SetMappings(m map[ObjectKey]string) { +- log.Debugf("GenericMappedRawStorage: SetMappings: %v", m) +- r.mux.Lock() +- r.fileMappings = m +- r.mux.Unlock() +-} +diff --git a/pkg/storage/rawstorage.go b/pkg/storage/rawstorage.go +deleted file mode 100644 +index 9330433..0000000 +--- a/pkg/storage/rawstorage.go ++++ /dev/null +@@ -1,217 +0,0 @@ +-package storage +- +-import ( +- "fmt" +- "io/ioutil" +- "os" +- "path" +- "path/filepath" +- "strconv" +- "strings" +- +- "github.com/weaveworks/libgitops/pkg/runtime" +- "github.com/weaveworks/libgitops/pkg/serializer" +- "github.com/weaveworks/libgitops/pkg/util" +- "k8s.io/apimachinery/pkg/runtime/schema" +-) +- +-// RawStorage is a Key-indexed low-level interface to +-// store byte-encoded Objects (resources) in non-volatile +-// memory. +-type RawStorage interface { +- // Read returns a resource's content based on key. +- // If the resource does not exist, it returns ErrNotFound. +- Read(key ObjectKey) ([]byte, error) +- // Exists checks if the resource indicated by key exists. +- Exists(key ObjectKey) bool +- // Write writes the given content to the resource indicated by key. +- // Error returns are implementation-specific. +- Write(key ObjectKey, content []byte) error +- // Delete deletes the resource indicated by key. +- // If the resource does not exist, it returns ErrNotFound. +- Delete(key ObjectKey) error +- // List returns all matching object keys based on the given KindKey. +- List(key KindKey) ([]ObjectKey, error) +- // Checksum returns a string checksum for the resource indicated by key. +- // If the resource does not exist, it returns ErrNotFound. +- Checksum(key ObjectKey) (string, error) +- // ContentType returns the content type of the contents of the resource indicated by key. +- ContentType(key ObjectKey) serializer.ContentType +- +- // WatchDir returns the path for Watchers to watch changes in. +- WatchDir() string +- // GetKey retrieves the Key containing the virtual path based +- // on the given physical file path returned by a Watcher. +- GetKey(path string) (ObjectKey, error) +-} +- +-func NewGenericRawStorage(dir string, gv schema.GroupVersion, ct serializer.ContentType) RawStorage { +- ext := extForContentType(ct) +- if ext == "" { +- panic("Invalid content type") +- } +- return &GenericRawStorage{ +- dir: dir, +- gv: gv, +- ct: ct, +- ext: ext, +- } +-} +- +-// GenericRawStorage is a rawstorage which stores objects as JSON files on disk, +-// in the form: ///metadata.json. +-// The GenericRawStorage only supports one GroupVersion at a time, and will error if given +-// any other resources +-type GenericRawStorage struct { +- dir string +- gv schema.GroupVersion +- ct serializer.ContentType +- ext string +-} +- +-func (r *GenericRawStorage) keyPath(key ObjectKey) string { +- return path.Join(r.dir, key.GetKind(), key.GetIdentifier(), fmt.Sprintf("metadata%s", r.ext)) +-} +- +-func (r *GenericRawStorage) kindKeyPath(kindKey KindKey) string { +- return path.Join(r.dir, kindKey.GetKind()) +-} +- +-func (r *GenericRawStorage) validateGroupVersion(kind KindKey) error { +- if r.gv.Group == kind.GetGroup() && r.gv.Version == kind.GetVersion() { +- return nil +- } +- +- return fmt.Errorf("GroupVersion %s/%s not supported by this GenericRawStorage", kind.GetGroup(), kind.GetVersion()) +-} +- +-func (r *GenericRawStorage) Read(key ObjectKey) ([]byte, error) { +- // Validate GroupVersion first +- if err := r.validateGroupVersion(key); err != nil { +- return nil, err +- } +- +- // Check if the resource indicated by key exists +- if !r.Exists(key) { +- return nil, ErrNotFound +- } +- +- return ioutil.ReadFile(r.keyPath(key)) +-} +- +-func (r *GenericRawStorage) Exists(key ObjectKey) bool { +- // Validate GroupVersion first +- if err := r.validateGroupVersion(key); err != nil { +- return false +- } +- +- return util.FileExists(r.keyPath(key)) +-} +- +-func (r *GenericRawStorage) Write(key ObjectKey, content []byte) error { +- // Validate GroupVersion first +- if err := r.validateGroupVersion(key); err != nil { +- return err +- } +- +- file := r.keyPath(key) +- +- // Create the underlying directories if they do not exist already +- if !r.Exists(key) { +- if err := os.MkdirAll(path.Dir(file), 0755); err != nil { +- return err +- } +- } +- +- return ioutil.WriteFile(file, content, 0644) +-} +- +-func (r *GenericRawStorage) Delete(key ObjectKey) error { +- // Validate GroupVersion first +- if err := r.validateGroupVersion(key); err != nil { +- return err +- } +- +- // Check if the resource indicated by key exists +- if !r.Exists(key) { +- return ErrNotFound +- } +- +- return os.RemoveAll(path.Dir(r.keyPath(key))) +-} +- +-func (r *GenericRawStorage) List(kind KindKey) ([]ObjectKey, error) { +- // Validate GroupVersion first +- if err := r.validateGroupVersion(kind); err != nil { +- return nil, err +- } +- +- entries, err := ioutil.ReadDir(r.kindKeyPath(kind)) +- if err != nil { +- return nil, err +- } +- +- result := make([]ObjectKey, 0, len(entries)) +- for _, entry := range entries { +- result = append(result, NewObjectKey(kind, runtime.NewIdentifier(entry.Name()))) +- } +- +- return result, nil +-} +- +-// This returns the modification time as a UnixNano string +-// If the file doesn't exist, return ErrNotFound +-func (r *GenericRawStorage) Checksum(key ObjectKey) (string, error) { +- // Validate GroupVersion first +- if err := r.validateGroupVersion(key); err != nil { +- return "", err +- } +- +- // Check if the resource indicated by key exists +- if !r.Exists(key) { +- return "", ErrNotFound +- } +- +- return checksumFromModTime(r.keyPath(key)) +-} +- +-func (r *GenericRawStorage) ContentType(_ ObjectKey) serializer.ContentType { +- return r.ct +-} +- +-func (r *GenericRawStorage) WatchDir() string { +- return r.dir +-} +- +-func (r *GenericRawStorage) GetKey(p string) (ObjectKey, error) { +- splitDir := strings.Split(filepath.Clean(r.dir), string(os.PathSeparator)) +- splitPath := strings.Split(filepath.Clean(p), string(os.PathSeparator)) +- +- if len(splitPath) < len(splitDir)+2 { +- return nil, fmt.Errorf("path not long enough: %s", p) +- } +- +- for i := 0; i < len(splitDir); i++ { +- if splitDir[i] != splitPath[i] { +- return nil, fmt.Errorf("path has wrong base: %s", p) +- } +- } +- kind := splitPath[len(splitDir)] +- uid := splitPath[len(splitDir)+1] +- gvk := schema.GroupVersionKind{ +- Group: r.gv.Group, +- Version: r.gv.Version, +- Kind: kind, +- } +- +- return NewObjectKey(NewKindKey(gvk), runtime.NewIdentifier(uid)), nil +-} +- +-func checksumFromModTime(path string) (string, error) { +- fi, err := os.Stat(path) +- if err != nil { +- return "", err +- } +- +- return strconv.FormatInt(fi.ModTime().UnixNano(), 10), nil +-} +diff --git a/pkg/storage/storage.go b/pkg/storage/storage.go +deleted file mode 100644 +index 4d94232..0000000 +--- a/pkg/storage/storage.go ++++ /dev/null +@@ -1,454 +0,0 @@ +-package storage +- +-import ( +- "bytes" +- "errors" +- "fmt" +- "io" +- +- "github.com/sirupsen/logrus" +- "github.com/weaveworks/libgitops/pkg/filter" +- "github.com/weaveworks/libgitops/pkg/runtime" +- "github.com/weaveworks/libgitops/pkg/serializer" +- patchutil "github.com/weaveworks/libgitops/pkg/util/patch" +- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +- kruntime "k8s.io/apimachinery/pkg/runtime" +- "k8s.io/apimachinery/pkg/runtime/schema" +-) +- +-var ( +- // ErrAmbiguousFind is returned when the user requested one object from a List+Filter process. +- ErrAmbiguousFind = errors.New("two or more results were aquired when one was expected") +- // ErrNotFound is returned when the requested resource wasn't found. +- ErrNotFound = errors.New("resource not found") +- // ErrAlreadyExists is returned when when WriteStorage.Create is called for an already stored object. +- ErrAlreadyExists = errors.New("resource already exists") +-) +- +-type ReadStorage interface { +- // Get returns a new Object for the resource at the specified kind/uid path, based on the file content. +- // If the resource referred to by the given ObjectKey does not exist, Get returns ErrNotFound. +- Get(key ObjectKey) (runtime.Object, error) +- +- // List lists Objects for the specific kind. Optionally, filters can be applied (see the filter package +- // for more information, e.g. filter.NameFilter{} and filter.UIDFilter{}) +- List(kind KindKey, opts ...filter.ListOption) ([]runtime.Object, error) +- +- // Find does a List underneath, also using filters, but always returns one object. If the List +- // underneath returned two or more results, ErrAmbiguousFind is returned. If no match was found, +- // ErrNotFound is returned. +- Find(kind KindKey, opts ...filter.ListOption) (runtime.Object, error) +- +- // +- // Partial object getters. +- // TODO: Figure out what we should do with these, do we need them and if so where? +- // +- +- // GetMeta returns a new Object's APIType representation for the resource at the specified kind/uid path. +- // If the resource referred to by the given ObjectKey does not exist, GetMeta returns ErrNotFound. +- GetMeta(key ObjectKey) (runtime.PartialObject, error) +- // ListMeta lists all Objects' APIType representation. In other words, +- // only metadata about each Object is unmarshalled (uid/name/kind/apiVersion). +- // This allows for faster runs (no need to unmarshal "the world"), and less +- // resource usage, when only metadata is unmarshalled into memory +- ListMeta(kind KindKey) ([]runtime.PartialObject, error) +- +- // +- // Cache-related methods. +- // +- +- // Checksum returns a string representing the state of an Object on disk +- // The checksum should change if any modifications have been made to the +- // Object on disk, it can be e.g. the Object's modification timestamp or +- // calculated checksum. If the Object is not found, ErrNotFound is returned. +- Checksum(key ObjectKey) (string, error) +- // Count returns the amount of available Objects of a specific kind +- // This is used by Caches to check if all Objects are cached to perform a List +- Count(kind KindKey) (uint64, error) +- +- // +- // Access to underlying Resources. +- // +- +- // RawStorage returns the RawStorage instance backing this Storage +- RawStorage() RawStorage +- // Serializer returns the serializer +- Serializer() serializer.Serializer +- +- // +- // Misc methods. +- // +- +- // ObjectKeyFor returns the ObjectKey for the given object +- ObjectKeyFor(obj runtime.Object) (ObjectKey, error) +- // Close closes all underlying resources (e.g. goroutines) used; before the application exits +- Close() error +-} +- +-type WriteStorage interface { +- // Create creates an entry for and stores the given Object in the storage. The Object must be new to the storage. +- // The ObjectMeta.CreationTimestamp field is set automatically to the current time if it is unset. +- Create(obj runtime.Object) error +- // Update updates the state of the given Object in the storage. The Object must exist in the storage. +- // The ObjectMeta.CreationTimestamp field is set automatically to the current time if it is unset. +- Update(obj runtime.Object) error +- +- // Patch performs a strategic merge patch on the Object with the given UID, using the byte-encoded patch given +- Patch(key ObjectKey, patch []byte) error +- // Delete removes an Object from the storage +- Delete(key ObjectKey) error +-} +- +-// Storage is an interface for persisting and retrieving API objects to/from a backend +-// One Storage instance handles all different Kinds of Objects +-type Storage interface { +- ReadStorage +- WriteStorage +-} +- +-// NewGenericStorage constructs a new Storage +-func NewGenericStorage(rawStorage RawStorage, serializer serializer.Serializer, identifiers []runtime.IdentifierFactory) Storage { +- return &GenericStorage{rawStorage, serializer, patchutil.NewPatcher(serializer), identifiers} +-} +- +-// GenericStorage implements the Storage interface +-type GenericStorage struct { +- raw RawStorage +- serializer serializer.Serializer +- patcher patchutil.Patcher +- identifiers []runtime.IdentifierFactory +-} +- +-var _ Storage = &GenericStorage{} +- +-func (s *GenericStorage) Serializer() serializer.Serializer { +- return s.serializer +-} +- +-// Get returns a new Object for the resource at the specified kind/uid path, based on the file content +-func (s *GenericStorage) Get(key ObjectKey) (runtime.Object, error) { +- content, err := s.raw.Read(key) +- if err != nil { +- return nil, err +- } +- +- return s.decode(key, content) +-} +- +-// TODO: Verify this works +-// GetMeta returns a new Object's APIType representation for the resource at the specified kind/uid path +-func (s *GenericStorage) GetMeta(key ObjectKey) (runtime.PartialObject, error) { +- content, err := s.raw.Read(key) +- if err != nil { +- return nil, err +- } +- +- return s.decodeMeta(key, content) +-} +- +-// TODO: Make sure we don't save a partial object +-func (s *GenericStorage) write(key ObjectKey, obj runtime.Object) error { +- // Set the content type based on the format given by the RawStorage, but default to JSON +- contentType := serializer.ContentTypeJSON +- if ct := s.raw.ContentType(key); len(ct) != 0 { +- contentType = ct +- } +- +- // Set creationTimestamp if not already populated +- t := obj.GetCreationTimestamp() +- if t.IsZero() { +- obj.SetCreationTimestamp(metav1.Now()) +- } +- +- var objBytes bytes.Buffer +- err := s.serializer.Encoder().Encode(serializer.NewFrameWriter(contentType, &objBytes), obj) +- if err != nil { +- return err +- } +- +- return s.raw.Write(key, objBytes.Bytes()) +-} +- +-func (s *GenericStorage) Create(obj runtime.Object) error { +- key, err := s.ObjectKeyFor(obj) +- if err != nil { +- return err +- } +- +- if s.raw.Exists(key) { +- return ErrAlreadyExists +- } +- +- // The object was not found so we can safely create it +- return s.write(key, obj) +-} +- +-func (s *GenericStorage) Update(obj runtime.Object) error { +- key, err := s.ObjectKeyFor(obj) +- if err != nil { +- return err +- } +- +- if !s.raw.Exists(key) { +- return ErrNotFound +- } +- +- // The object was found so we can safely update it +- return s.write(key, obj) +-} +- +-// Patch performs a strategic merge patch on the object with the given UID, using the byte-encoded patch given +-func (s *GenericStorage) Patch(key ObjectKey, patch []byte) error { +- oldContent, err := s.raw.Read(key) +- if err != nil { +- return err +- } +- +- newContent, err := s.patcher.Apply(oldContent, patch, key.GetGVK()) +- if err != nil { +- return err +- } +- +- return s.raw.Write(key, newContent) +-} +- +-// Delete removes an Object from the storage +-func (s *GenericStorage) Delete(key ObjectKey) error { +- return s.raw.Delete(key) +-} +- +-// Checksum returns a string representing the state of an Object on disk +-func (s *GenericStorage) Checksum(key ObjectKey) (string, error) { +- return s.raw.Checksum(key) +-} +- +-func (s *GenericStorage) list(kind KindKey) (result []runtime.Object, walkerr error) { +- walkerr = s.walkKind(kind, func(key ObjectKey, content []byte) error { +- obj, err := s.decode(key, content) +- if err != nil { +- return err +- } +- +- result = append(result, obj) +- return nil +- }) +- return +-} +- +-// List lists Objects for the specific kind. Optionally, filters can be applied (see the filter package +-// for more information, e.g. filter.NameFilter{} and filter.UIDFilter{}) +-func (s *GenericStorage) List(kind KindKey, opts ...filter.ListOption) ([]runtime.Object, error) { +- // First, complete the options struct +- o, err := filter.MakeListOptions(opts...) +- if err != nil { +- return nil, err +- } +- +- // Do an internal list to get all objects +- objs, err := s.list(kind) +- if err != nil { +- return nil, err +- } +- +- // For all list filters, pipe the output of the previous as the input to the next, in order. +- for _, filter := range o.Filters { +- objs, err = filter.Filter(objs...) +- if err != nil { +- return nil, err +- } +- } +- return objs, nil +-} +- +-// Find does a List underneath, also using filters, but always returns one object. If the List +-// underneath returned two or more results, ErrAmbiguousFind is returned. If no match was found, +-// ErrNotFound is returned. +-func (s *GenericStorage) Find(kind KindKey, opts ...filter.ListOption) (runtime.Object, error) { +- // Do a normal list underneath +- objs, err := s.List(kind, opts...) +- if err != nil { +- return nil, err +- } +- // Return based on the object count +- switch l := len(objs); l { +- case 0: +- return nil, fmt.Errorf("no Find match found: %w", ErrNotFound) +- case 1: +- return objs[0], nil +- default: +- return nil, fmt.Errorf("too many (%d) matches: %v: %w", l, objs, ErrAmbiguousFind) +- } +-} +- +-// ListMeta lists all Objects' APIType representation. In other words, +-// only metadata about each Object is unmarshalled (uid/name/kind/apiVersion). +-// This allows for faster runs (no need to unmarshal "the world"), and less +-// resource usage, when only metadata is unmarshalled into memory +-func (s *GenericStorage) ListMeta(kind KindKey) (result []runtime.PartialObject, walkerr error) { +- walkerr = s.walkKind(kind, func(key ObjectKey, content []byte) error { +- +- obj, err := s.decodeMeta(key, content) +- if err != nil { +- return err +- } +- +- result = append(result, obj) +- return nil +- }) +- return +-} +- +-// Count counts the Objects for the specific kind +-func (s *GenericStorage) Count(kind KindKey) (uint64, error) { +- entries, err := s.raw.List(kind) +- return uint64(len(entries)), err +-} +- +-func (s *GenericStorage) ObjectKeyFor(obj runtime.Object) (ObjectKey, error) { +- var gvk schema.GroupVersionKind +- var err error +- +- _, isPartialObject := obj.(runtime.PartialObject) +- if isPartialObject { +- gvk = obj.GetObjectKind().GroupVersionKind() +- // TODO: Error if empty +- } else { +- gvk, err = serializer.GVKForObject(s.serializer.Scheme(), obj) +- if err != nil { +- return nil, err +- } +- } +- +- id := s.identify(obj) +- if id == nil { +- return nil, fmt.Errorf("couldn't identify object") +- } +- return NewObjectKey(NewKindKey(gvk), id), nil +-} +- +-// RawStorage returns the RawStorage instance backing this Storage +-func (s *GenericStorage) RawStorage() RawStorage { +- return s.raw +-} +- +-// Close closes all underlying resources (e.g. goroutines) used; before the application exits +-func (s *GenericStorage) Close() error { +- return nil // nothing to do here for GenericStorage +-} +- +-// identify loops through the identifiers, in priority order, to identify the object correctly +-func (s *GenericStorage) identify(obj runtime.Object) runtime.Identifyable { +- for _, identifier := range s.identifiers { +- +- id, ok := identifier.Identify(obj) +- if ok { +- return id +- } +- } +- return nil +-} +- +-func (s *GenericStorage) decode(key ObjectKey, content []byte) (runtime.Object, error) { +- gvk := key.GetGVK() +- // Decode the bytes to the internal version of the Object, if desired +- isInternal := gvk.Version == kruntime.APIVersionInternal +- +- // Decode the bytes into an Object +- ct := s.raw.ContentType(key) +- logrus.Infof("Decoding with content type %s", ct) +- obj, err := s.serializer.Decoder( +- serializer.WithConvertToHubDecode(isInternal), +- ).Decode(serializer.NewFrameReader(ct, serializer.FromBytes(content))) +- if err != nil { +- return nil, err +- } +- +- // Cast to runtime.Object, and make sure it works +- metaObj, ok := obj.(runtime.Object) +- if !ok { +- return nil, fmt.Errorf("can't convert to libgitops.runtime.Object") +- } +- +- // Set the desired gvk of this Object from the caller +- metaObj.GetObjectKind().SetGroupVersionKind(gvk) +- return metaObj, nil +-} +- +-func (s *GenericStorage) decodeMeta(key ObjectKey, content []byte) (runtime.PartialObject, error) { +- gvk := key.GetGVK() +- partobjs, err := DecodePartialObjects(serializer.FromBytes(content), s.serializer.Scheme(), false, &gvk) +- if err != nil { +- return nil, err +- } +- +- return partobjs[0], nil +-} +- +-func (s *GenericStorage) walkKind(kind KindKey, fn func(key ObjectKey, content []byte) error) error { +- keys, err := s.raw.List(kind) +- if err != nil { +- return err +- } +- +- for _, key := range keys { +- // Allow metadata.json to not exist, although the directory does exist +- if !s.raw.Exists(key) { +- continue +- } +- +- content, err := s.raw.Read(key) +- if err != nil { +- return err +- } +- +- if err := fn(key, content); err != nil { +- return err +- } +- } +- +- return nil +-} +- +-// DecodePartialObjects reads any set of frames from the given ReadCloser, decodes the frames into +-// PartialObjects, validates that the decoded objects are known to the scheme, and optionally sets a default +-// group +-func DecodePartialObjects(rc io.ReadCloser, scheme *kruntime.Scheme, allowMultiple bool, defaultGVK *schema.GroupVersionKind) ([]runtime.PartialObject, error) { +- fr := serializer.NewYAMLFrameReader(rc) +- +- frames, err := serializer.ReadFrameList(fr) +- if err != nil { +- return nil, err +- } +- +- // If we only allow one frame, signal that early +- if !allowMultiple && len(frames) != 1 { +- return nil, fmt.Errorf("DecodePartialObjects: unexpected number of frames received from ReadCloser: %d expected 1", len(frames)) +- } +- +- objs := make([]runtime.PartialObject, 0, len(frames)) +- for _, frame := range frames { +- partobj, err := runtime.NewPartialObject(frame) +- if err != nil { +- return nil, err +- } +- +- gvk := partobj.GetObjectKind().GroupVersionKind() +- +- // Don't decode API objects unknown to the scheme (e.g. Kubernetes manifests) +- if !scheme.Recognizes(gvk) { +- // TODO: Typed error +- return nil, fmt.Errorf("unknown GroupVersionKind: %s", partobj.GetObjectKind().GroupVersionKind()) +- } +- +- if defaultGVK != nil { +- // Set the desired gvk from the caller of this Object, if defaultGVK is set +- // In practice, this means, although we got an external type, +- // we might want internal Objects later in the client. Hence, +- // set the right expectation here +- partobj.GetObjectKind().SetGroupVersionKind(gvk) +- } +- +- objs = append(objs, partobj) +- } +- return objs, nil +-} +diff --git a/pkg/storage/sync/storage.go b/pkg/storage/sync/storage.go +deleted file mode 100644 +index 458f7fa..0000000 +--- a/pkg/storage/sync/storage.go ++++ /dev/null +@@ -1,188 +0,0 @@ +-package sync +- +-/* +- +-TODO: Revisit if we need this file/package in the future. +- +-import ( +- "fmt" +- +- log "github.com/sirupsen/logrus" +- "github.com/weaveworks/libgitops/pkg/runtime" +- "github.com/weaveworks/libgitops/pkg/storage" +- "github.com/weaveworks/libgitops/pkg/storage/watch" +- "github.com/weaveworks/libgitops/pkg/storage/watch/update" +- "github.com/weaveworks/libgitops/pkg/util/sync" +-) +- +-const updateBuffer = 4096 // How many updates to buffer, 4096 should be enough for even a high update frequency +- +-// SyncStorage is a Storage implementation taking in multiple Storages and +-// keeping them in sync. Any write operation executed on the SyncStorage +-// is propagated to all of the Storages it manages (including the embedded +-// one). For any retrieval or generation operation, the embedded Storage +-// will be used (it is treated as read-write). As all other Storages only +-// receive write operations, they can be thought of as write-only. +-type SyncStorage struct { +- storage.Storage +- storages []storage.Storage +- inboundStream update.UpdateStream +- outboundStream update.UpdateStream +- monitor *sync.Monitor +-} +- +-// SyncStorage implements update.EventStorage. +-var _ update.EventStorage = &SyncStorage{} +- +-// NewSyncStorage constructs a new SyncStorage +-func NewSyncStorage(rwStorage storage.Storage, wStorages ...storage.Storage) storage.Storage { +- ss := &SyncStorage{ +- Storage: rwStorage, +- storages: append(wStorages, rwStorage), +- } +- +- for _, s := range ss.storages { +- if watchStorage, ok := s.(watch.WatchStorage); ok { +- // Populate eventStream if we found a watchstorage +- if ss.inboundStream == nil { +- ss.inboundStream = make(update.UpdateStream, updateBuffer) +- } +- watchStorage.SetUpdateStream(ss.inboundStream) +- } +- } +- +- if ss.inboundStream != nil { +- ss.monitor = sync.RunMonitor(ss.monitorFunc) +- ss.outboundStream = make(update.UpdateStream, updateBuffer) +- } +- +- return ss +-} +- +-// Set is propagated to all Storages +-func (ss *SyncStorage) Set(obj runtime.Object) error { +- return ss.runAll(func(s storage.Storage) error { +- return s.Set(obj) +- }) +-} +- +-// Patch is propagated to all Storages +-func (ss *SyncStorage) Patch(key storage.ObjectKey, patch []byte) error { +- return ss.runAll(func(s storage.Storage) error { +- return s.Patch(key, patch) +- }) +-} +- +-// Delete is propagated to all Storages +-func (ss *SyncStorage) Delete(key storage.ObjectKey) error { +- return ss.runAll(func(s storage.Storage) error { +- return s.Delete(key) +- }) +-} +- +-func (ss *SyncStorage) Close() error { +- // Close all WatchStorages +- for _, s := range ss.storages { +- if watchStorage, ok := s.(watch.WatchStorage); ok { +- _ = watchStorage.Close() +- } +- } +- +- // Close the event streams if set +- if ss.inboundStream != nil { +- close(ss.inboundStream) +- } +- if ss.outboundStream != nil { +- close(ss.outboundStream) +- } +- // Wait for the monitor goroutine +- ss.monitor.Wait() +- return nil +-} +- +-func (ss *SyncStorage) GetUpdateStream() update.UpdateStream { +- return ss.outboundStream +-} +- +-// runAll runs the given function for all Storages in parallel and aggregates all errors +-func (ss *SyncStorage) runAll(f func(storage.Storage) error) (err error) { +- type result struct { +- int +- error +- } +- +- errC := make(chan result) +- for i, s := range ss.storages { +- go func(i int, s storage.Storage) { +- errC <- result{i, f(s)} +- }(i, s) // NOTE: This requires i and s as arguments, otherwise they will be evaluated for one Storage only +- } +- +- for i := 0; i < len(ss.storages); i++ { +- if result := <-errC; result.error != nil { +- if err == nil { +- err = fmt.Errorf("SyncStorage: Error in Storage %d: %v", result.int, result.error) +- } else { +- err = fmt.Errorf("%v\n%29s %d: %v", err, "and error in Storage", result.int, result.error) +- } +- } +- } +- +- return +-} +- +-func (ss *SyncStorage) monitorFunc() { +- log.Debug("SyncStorage: Monitoring thread started") +- defer log.Debug("SyncStorage: Monitoring thread stopped") +- +- // TODO: Support detecting changes done when the GitOps daemon isn't running +- // This is difficult to do though, as we have don't know which state is the latest +- // For now, only update the state on write when the daemon is running +- for { +- upd, ok := <-ss.inboundStream +- if ok { +- log.Debugf("SyncStorage: Received update %v %t", upd, ok) +- +- gvk := upd.PartialObject.GetObjectKind().GroupVersionKind() +- uid := upd.PartialObject.GetUID() +- key := storage.NewObjectKey(storage.NewKindKey(gvk), runtime.NewIdentifier(string(uid))) +- log.Debugf("SyncStorage: Object has gvk=%q and uid=%q", gvk, uid) +- +- switch upd.Event { +- case update.ObjectEventModify, update.ObjectEventCreate: +- // First load the Object using the Storage given in the update, +- // then set it using the client constructed above +- +- obj, err := upd.Storage.Get(key) +- if err != nil { +- log.Errorf("Failed to get Object with UID %q: %v", upd.PartialObject.GetUID(), err) +- continue +- } +- +- if err = ss.Set(obj); err != nil { +- log.Errorf("Failed to set Object with UID %q: %v", upd.PartialObject.GetUID(), err) +- continue +- } +- case update.ObjectEventDelete: +- // For deletion we use the generated "fake" APIType object +- if err := ss.Delete(key); err != nil { +- log.Errorf("Failed to delete Object with UID %q: %v", upd.PartialObject.GetUID(), err) +- continue +- } +- } +- +- // Send the update to the listeners unless the channel is full, +- // in which case issue a warning. The channel can hold as many +- // updates as updateBuffer specifies. +- select { +- case ss.outboundStream <- upd: +- log.Debugf("SyncStorage: Sent update: %v", upd) +- default: +- log.Warn("SyncStorage: Failed to send update, channel full") +- } +- } else { +- return +- } +- } +-} +-*/ +diff --git a/pkg/storage/transaction/commit.go b/pkg/storage/transaction/commit.go +deleted file mode 100644 +index 30e55ae..0000000 +--- a/pkg/storage/transaction/commit.go ++++ /dev/null +@@ -1,79 +0,0 @@ +-package transaction +- +-import ( +- "fmt" +- +- "github.com/fluxcd/go-git-providers/validation" +-) +- +-// CommitResult describes a result of a transaction. +-type CommitResult interface { +- // GetAuthorName describes the author's name (as per git config) +- // +required +- GetAuthorName() string +- // GetAuthorEmail describes the author's email (as per git config) +- // +required +- GetAuthorEmail() string +- // GetTitle describes the change concisely, so it can be used as a commit message or PR title. +- // +required +- GetTitle() string +- // GetDescription contains optional extra information about the change. +- // +optional +- GetDescription() string +- +- // GetMessage returns GetTitle() followed by a newline and GetDescription(), if set. +- GetMessage() string +- // Validate validates that all required fields are set, and given data is valid. +- Validate() error +-} +- +-// GenericCommitResult implements CommitResult. +-var _ CommitResult = &GenericCommitResult{} +- +-// GenericCommitResult implements CommitResult. +-type GenericCommitResult struct { +- // AuthorName describes the author's name (as per git config) +- // +required +- AuthorName string +- // AuthorEmail describes the author's email (as per git config) +- // +required +- AuthorEmail string +- // Title describes the change concisely, so it can be used as a commit message or PR title. +- // +required +- Title string +- // Description contains optional extra information about the change. +- // +optional +- Description string +-} +- +-func (r *GenericCommitResult) GetAuthorName() string { +- return r.AuthorName +-} +-func (r *GenericCommitResult) GetAuthorEmail() string { +- return r.AuthorEmail +-} +-func (r *GenericCommitResult) GetTitle() string { +- return r.Title +-} +-func (r *GenericCommitResult) GetDescription() string { +- return r.Description +-} +-func (r *GenericCommitResult) GetMessage() string { +- if len(r.Description) == 0 { +- return r.Title +- } +- return fmt.Sprintf("%s\n%s", r.Title, r.Description) +-} +-func (r *GenericCommitResult) Validate() error { +- v := validation.New("GenericCommitResult") +- if len(r.AuthorName) == 0 { +- v.Required("AuthorName") +- } +- if len(r.AuthorEmail) == 0 { +- v.Required("AuthorEmail") +- } +- if len(r.Title) == 0 { +- v.Required("Title") +- } +- return v.Error() +-} +diff --git a/pkg/storage/transaction/git.go b/pkg/storage/transaction/git.go +deleted file mode 100644 +index efc57ab..0000000 +--- a/pkg/storage/transaction/git.go ++++ /dev/null +@@ -1,161 +0,0 @@ +-package transaction +- +-import ( +- "context" +- "fmt" +- "strings" +- +- "github.com/sirupsen/logrus" +- "github.com/weaveworks/libgitops/pkg/gitdir" +- "github.com/weaveworks/libgitops/pkg/runtime" +- "github.com/weaveworks/libgitops/pkg/serializer" +- "github.com/weaveworks/libgitops/pkg/storage" +- "github.com/weaveworks/libgitops/pkg/util" +- "github.com/weaveworks/libgitops/pkg/util/watcher" +-) +- +-var excludeDirs = []string{".git"} +- +-func NewGitStorage(gitDir gitdir.GitDirectory, prProvider PullRequestProvider, ser serializer.Serializer) (TransactionStorage, error) { +- // Make sure the repo is cloned. If this func has already been called, it will be a no-op. +- if err := gitDir.StartCheckoutLoop(); err != nil { +- return nil, err +- } +- +- raw := storage.NewGenericMappedRawStorage(gitDir.Dir()) +- s := storage.NewGenericStorage(raw, ser, []runtime.IdentifierFactory{runtime.Metav1NameIdentifier}) +- +- gitStorage := &GitStorage{ +- ReadStorage: s, +- s: s, +- raw: raw, +- gitDir: gitDir, +- prProvider: prProvider, +- } +- // Do a first sync now, and then start the background loop +- if err := gitStorage.sync(); err != nil { +- return nil, err +- } +- gitStorage.syncLoop() +- +- return gitStorage, nil +-} +- +-type GitStorage struct { +- storage.ReadStorage +- +- s storage.Storage +- raw storage.MappedRawStorage +- gitDir gitdir.GitDirectory +- prProvider PullRequestProvider +-} +- +-func (s *GitStorage) syncLoop() { +- go func() { +- for { +- if commit, ok := <-s.gitDir.CommitChannel(); ok { +- logrus.Debugf("GitStorage: Got info about commit %q, syncing...", commit) +- if err := s.sync(); err != nil { +- logrus.Errorf("GitStorage: Got sync error: %v", err) +- } +- } +- } +- }() +-} +- +-func (s *GitStorage) sync() error { +- mappings, err := computeMappings(s.gitDir.Dir(), s.s) +- if err != nil { +- return err +- } +- logrus.Debugf("Rewriting the mappings to %v", mappings) +- s.raw.SetMappings(mappings) +- return nil +-} +- +-func (s *GitStorage) Transaction(ctx context.Context, streamName string, fn TransactionFunc) error { +- // Append random bytes to the end of the stream name if it ends with a dash +- if strings.HasSuffix(streamName, "-") { +- suffix, err := util.RandomSHA(4) +- if err != nil { +- return err +- } +- streamName += suffix +- } +- +- // Make sure we have the latest available state +- if err := s.gitDir.Pull(ctx); err != nil { +- return err +- } +- // Make sure no other Git ops can take place during the transaction, wait for other ongoing operations. +- s.gitDir.Suspend() +- defer s.gitDir.Resume() +- // Always switch back to the main branch afterwards. +- // TODO ordering of the defers, and return deferred error +- defer func() { _ = s.gitDir.CheckoutMainBranch() }() +- +- // Check out a new branch with the given name +- if err := s.gitDir.CheckoutNewBranch(streamName); err != nil { +- return err +- } +- // Invoke the transaction +- result, err := fn(ctx, s.s) +- if err != nil { +- return err +- } +- // Make sure the result is valid +- if err := result.Validate(); err != nil { +- return fmt.Errorf("transaction result is not valid: %w", err) +- } +- // Perform the commit +- if err := s.gitDir.Commit(ctx, result.GetAuthorName(), result.GetAuthorEmail(), result.GetMessage()); err != nil { +- return err +- } +- // Return if no PR should be made +- prResult, ok := result.(PullRequestResult) +- if !ok { +- return nil +- } +- // If a PR was asked for, and no provider was given, error out +- if s.prProvider == nil { +- return ErrNoPullRequestProvider +- } +- // Create the PR using the provider. +- return s.prProvider.CreatePullRequest(ctx, &GenericPullRequestSpec{ +- PullRequestResult: prResult, +- MainBranch: s.gitDir.MainBranch(), +- MergeBranch: streamName, +- RepositoryRef: s.gitDir.RepositoryRef(), +- }) +-} +- +-func computeMappings(dir string, s storage.Storage) (map[storage.ObjectKey]string, error) { +- validExts := make([]string, 0, len(storage.ContentTypes)) +- for ext := range storage.ContentTypes { +- validExts = append(validExts, ext) +- } +- +- files, err := watcher.WalkDirectoryForFiles(dir, validExts, excludeDirs) +- if err != nil { +- return nil, err +- } +- +- // TODO: Compute the difference between the earlier state, and implement EventStorage so the user +- // can automatically subscribe to changes of objects between versions. +- m := map[storage.ObjectKey]string{} +- for _, file := range files { +- partObjs, err := storage.DecodePartialObjects(serializer.FromFile(file), s.Serializer().Scheme(), false, nil) +- if err != nil { +- logrus.Errorf("couldn't decode %q into a partial object: %v", file, err) +- continue +- } +- key, err := s.ObjectKeyFor(partObjs[0]) +- if err != nil { +- logrus.Errorf("couldn't get objectkey for partial object: %v", err) +- continue +- } +- logrus.Debugf("Adding mapping between %s and %q", key, file) +- m[key] = file +- } +- return m, nil +-} +diff --git a/pkg/storage/transaction/pullrequest.go b/pkg/storage/transaction/pullrequest.go +deleted file mode 100644 +index bf0fcf2..0000000 +--- a/pkg/storage/transaction/pullrequest.go ++++ /dev/null +@@ -1,130 +0,0 @@ +-package transaction +- +-import ( +- "context" +- +- "github.com/fluxcd/go-git-providers/gitprovider" +- "github.com/fluxcd/go-git-providers/validation" +-) +- +-// PullRequestResult can be returned from a TransactionFunc instead of a CommitResult, if +-// a PullRequest is desired to be created by the PullRequestProvider. +-type PullRequestResult interface { +- // PullRequestResult is a superset of CommitResult +- CommitResult +- +- // GetLabels specifies what labels should be applied on the PR. +- // +optional +- GetLabels() []string +- // GetAssignees specifies what user login names should be assigned to this PR. +- // Note: Only users with "pull" access or more can be assigned. +- // +optional +- GetAssignees() []string +- // GetMilestone specifies what milestone this should be attached to. +- // +optional +- GetMilestone() string +-} +- +-// GenericPullRequestResult implements PullRequestResult. +-var _ PullRequestResult = &GenericPullRequestResult{} +- +-// GenericPullRequestResult implements PullRequestResult. +-type GenericPullRequestResult struct { +- // GenericPullRequestResult is a superset of a CommitResult. +- CommitResult +- +- // Labels specifies what labels should be applied on the PR. +- // +optional +- Labels []string +- // Assignees specifies what user login names should be assigned to this PR. +- // Note: Only users with "pull" access or more can be assigned. +- // +optional +- Assignees []string +- // Milestone specifies what milestone this should be attached to. +- // +optional +- Milestone string +-} +- +-func (r *GenericPullRequestResult) GetLabels() []string { +- return r.Labels +-} +-func (r *GenericPullRequestResult) GetAssignees() []string { +- return r.Assignees +-} +-func (r *GenericPullRequestResult) GetMilestone() string { +- return r.Milestone +-} +-func (r *GenericPullRequestResult) Validate() error { +- v := validation.New("GenericPullRequestResult") +- // Just validate the "inner" object +- v.Append(r.CommitResult.Validate(), r.CommitResult, "CommitResult") +- return v.Error() +-} +- +-// PullRequestSpec is the messaging interface between the TransactionStorage, and the +-// PullRequestProvider. The PullRequestSpec contains all the needed information for creating +-// a Pull Request successfully. +-type PullRequestSpec interface { +- // PullRequestSpec is a superset of PullRequestResult. +- PullRequestResult +- +- // GetMainBranch returns the main branch of the repository. +- // +required +- GetMainBranch() string +- // GetMergeBranch returns the branch that is pending to be merged into main with this PR. +- // +required +- GetMergeBranch() string +- // GetMergeBranch returns the branch that is pending to be merged into main with this PR. +- // +required +- GetRepositoryRef() gitprovider.RepositoryRef +-} +- +-// GenericPullRequestSpec implements PullRequestSpec. +-type GenericPullRequestSpec struct { +- // GenericPullRequestSpec is a superset of PullRequestResult. +- PullRequestResult +- +- // MainBranch returns the main branch of the repository. +- // +required +- MainBranch string +- // MergeBranch returns the branch that is pending to be merged into main with this PR. +- // +required +- MergeBranch string +- // RepositoryRef returns the branch that is pending to be merged into main with this PR. +- // +required +- RepositoryRef gitprovider.RepositoryRef +-} +- +-func (r *GenericPullRequestSpec) GetMainBranch() string { +- return r.MainBranch +-} +-func (r *GenericPullRequestSpec) GetMergeBranch() string { +- return r.MergeBranch +-} +-func (r *GenericPullRequestSpec) GetRepositoryRef() gitprovider.RepositoryRef { +- return r.RepositoryRef +-} +-func (r *GenericPullRequestSpec) Validate() error { +- v := validation.New("GenericPullRequestSpec") +- // Just validate the "inner" object +- v.Append(r.PullRequestResult.Validate(), r.PullRequestResult, "PullRequestResult") +- +- if len(r.MainBranch) == 0 { +- v.Required("MainBranch") +- } +- if len(r.MergeBranch) == 0 { +- v.Required("MergeBranch") +- } +- if r.RepositoryRef == nil { +- v.Required("RepositoryRef") +- } +- return v.Error() +-} +- +-// PullRequestProvider is an interface for providers that can create so-called "Pull Requests", +-// as popularized by Git. A Pull Request is a formal ask for a branch to be merged into the main one. +-// It can be UI-based, as in GitHub and GitLab, or it can be using some other method. +-type PullRequestProvider interface { +- // CreatePullRequest creates a Pull Request using the given specification. +- CreatePullRequest(ctx context.Context, spec PullRequestSpec) error +-} +diff --git a/pkg/storage/transaction/pullrequest/github/github.go b/pkg/storage/transaction/pullrequest/github/github.go +deleted file mode 100644 +index d8efbd6..0000000 +--- a/pkg/storage/transaction/pullrequest/github/github.go ++++ /dev/null +@@ -1,119 +0,0 @@ +-package github +- +-import ( +- "context" +- "errors" +- "fmt" +- +- "github.com/fluxcd/go-git-providers/github" +- "github.com/fluxcd/go-git-providers/gitprovider" +- gogithub "github.com/google/go-github/v32/github" +- "github.com/weaveworks/libgitops/pkg/storage/transaction" +-) +- +-// TODO: This package should really only depend on go-git-providers' abstraction interface +- +-var ErrProviderNotSupported = errors.New("only the Github go-git-providers provider is supported at the moment") +- +-// NewGitHubPRProvider returns a new transaction.PullRequestProvider from a gitprovider.Client. +-func NewGitHubPRProvider(c gitprovider.Client) (transaction.PullRequestProvider, error) { +- // Make sure a Github client was passed +- if c.ProviderID() != github.ProviderID { +- return nil, ErrProviderNotSupported +- } +- return &prCreator{c}, nil +-} +- +-type prCreator struct { +- c gitprovider.Client +-} +- +-func (c *prCreator) CreatePullRequest(ctx context.Context, spec transaction.PullRequestSpec) error { +- // First, validate the input +- if err := spec.Validate(); err != nil { +- return fmt.Errorf("given PullRequestSpec wasn't valid") +- } +- +- // Use the "raw" go-github client to do this +- ghClient := c.c.Raw().(*gogithub.Client) +- +- // Helper variables +- owner := spec.GetRepositoryRef().GetIdentity() +- repo := spec.GetRepositoryRef().GetRepository() +- var body *string +- if spec.GetDescription() != "" { +- body = gogithub.String(spec.GetDescription()) +- } +- +- // Create the Pull Request +- pr, _, err := ghClient.PullRequests.Create(ctx, owner, repo, &gogithub.NewPullRequest{ +- Head: gogithub.String(spec.GetMergeBranch()), +- Base: gogithub.String(spec.GetMainBranch()), +- Title: gogithub.String(spec.GetTitle()), +- Body: body, +- }) +- if err != nil { +- return err +- } +- +- // If spec.GetMilestone() is set, fetch the ID of the milestone +- // Only set milestoneID to non-nil if specified +- var milestoneID *int +- if len(spec.GetMilestone()) != 0 { +- milestoneID, err = getMilestoneID(ctx, ghClient, owner, repo, spec.GetMilestone()) +- if err != nil { +- return err +- } +- } +- +- // Only set assignees to non-nil if specified +- var assignees *[]string +- if a := spec.GetAssignees(); len(a) != 0 { +- assignees = &a +- } +- +- // Only set labels to non-nil if specified +- var labels *[]string +- if l := spec.GetLabels(); len(l) != 0 { +- labels = &l +- } +- +- // Only PATCH the PR if any of the fields were set +- if milestoneID != nil || assignees != nil || labels != nil { +- _, _, err := ghClient.Issues.Edit(ctx, owner, repo, pr.GetNumber(), &gogithub.IssueRequest{ +- Milestone: milestoneID, +- Assignees: assignees, +- Labels: labels, +- }) +- if err != nil { +- return err +- } +- } +- +- return nil +-} +- +-func getMilestoneID(ctx context.Context, c *gogithub.Client, owner, repo, milestoneName string) (*int, error) { +- // List all milestones in the repo +- // TODO: This could/should use pagination +- milestones, _, err := c.Issues.ListMilestones(ctx, owner, repo, &gogithub.MilestoneListOptions{ +- State: "all", +- }) +- if err != nil { +- return nil, err +- } +- // Loop through all milestones, search for one with the right name +- for _, milestone := range milestones { +- // Only consider a milestone with the right name +- if milestone.GetTitle() != milestoneName { +- continue +- } +- // Validate nil to avoid panics +- if milestone.Number == nil { +- return nil, fmt.Errorf("didn't expect milestone Number to be nil: %v", milestone) +- } +- // Return the Milestone number +- return milestone.Number, nil +- } +- return nil, fmt.Errorf("couldn't find milestone with name: %s", milestoneName) +-} +diff --git a/pkg/storage/transaction/storage.go b/pkg/storage/transaction/storage.go +deleted file mode 100644 +index 8a60e93..0000000 +--- a/pkg/storage/transaction/storage.go ++++ /dev/null +@@ -1,28 +0,0 @@ +-package transaction +- +-import ( +- "context" +- "errors" +- +- "github.com/weaveworks/libgitops/pkg/storage" +-) +- +-var ( +- ErrAbortTransaction = errors.New("transaction aborted") +- ErrTransactionActive = errors.New("transaction is active") +- ErrNoPullRequestProvider = errors.New("no pull request provider given") +-) +- +-type TransactionFunc func(ctx context.Context, s storage.Storage) (CommitResult, error) +- +-type TransactionStorage interface { +- storage.ReadStorage +- +- // Transaction creates a new "stream" (for Git: branch) with the given name, or +- // prefix if streamName ends with a dash (in that case, a 8-char hash will be appended). +- // The environment is made sure to be as up-to-date as possible before fn executes. When +- // fn executes, the given storage can be used to modify the desired state. If you want to +- // "commit" the changes made in fn, just return nil. If you want to abort, return ErrAbortTransaction. +- // If you want to +- Transaction(ctx context.Context, streamName string, fn TransactionFunc) error +-} +diff --git a/pkg/storage/utils.go b/pkg/storage/utils.go +new file mode 100644 +index 0000000..d45323b +--- /dev/null ++++ b/pkg/storage/utils.go +@@ -0,0 +1,23 @@ ++package storage ++ ++import ( ++ "fmt" ++ ++ "github.com/weaveworks/libgitops/pkg/storage/core" ++) ++ ++// VerifyNamespaced verifies that the given GroupKind and namespace parameter follows ++// the rule of the Namespacer. ++func VerifyNamespaced(namespacer core.Namespacer, gk core.GroupKind, ns string) error { ++ // Get namespacing info ++ namespaced, err := namespacer.IsNamespaced(gk) ++ if err != nil { ++ return err ++ } ++ if namespaced && ns == "" { ++ return fmt.Errorf("%w: namespaced kind %v requires non-empty namespace", ErrNamespacedMismatch, gk) ++ } else if !namespaced && ns != "" { ++ return fmt.Errorf("%w: non-namespaced kind %v must not have namespace parameter set", ErrNamespacedMismatch, gk) ++ } ++ return nil ++} +diff --git a/pkg/storage/watch/storage.go b/pkg/storage/watch/storage.go +deleted file mode 100644 +index f3d7b0b..0000000 +--- a/pkg/storage/watch/storage.go ++++ /dev/null +@@ -1,244 +0,0 @@ +-package watch +- +-import ( +- "io/ioutil" +- +- log "github.com/sirupsen/logrus" +- "github.com/weaveworks/libgitops/pkg/runtime" +- "github.com/weaveworks/libgitops/pkg/serializer" +- "github.com/weaveworks/libgitops/pkg/storage" +- "github.com/weaveworks/libgitops/pkg/storage/watch/update" +- "github.com/weaveworks/libgitops/pkg/util/sync" +- "github.com/weaveworks/libgitops/pkg/util/watcher" +- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +- "k8s.io/apimachinery/pkg/types" +-) +- +-// NewManifestStorage returns a pre-configured GenericWatchStorage backed by a storage.GenericStorage, +-// and a GenericMappedRawStorage for the given manifestDir and Serializer. This should be sufficient +-// for most users that want to watch changes in a directory with manifests. +-func NewManifestStorage(manifestDir string, ser serializer.Serializer) (update.EventStorage, error) { +- return NewGenericWatchStorage( +- storage.NewGenericStorage( +- storage.NewGenericMappedRawStorage(manifestDir), +- ser, +- []runtime.IdentifierFactory{runtime.Metav1NameIdentifier}, +- ), +- ) +-} +- +-// NewGenericWatchStorage is an extended Storage implementation, which provides a watcher +-// for watching changes in the directory managed by the embedded Storage's RawStorage. +-// If the RawStorage is a MappedRawStorage instance, it's mappings will automatically +-// be updated by the WatchStorage. Update events are sent to the given event stream. +-// Note: This WatchStorage only works for one-frame files (i.e. only one YAML document +-// per file is supported). +-func NewGenericWatchStorage(s storage.Storage) (update.EventStorage, error) { +- ws := &GenericWatchStorage{ +- Storage: s, +- } +- +- var err error +- var files []string +- if ws.watcher, files, err = watcher.NewFileWatcher(s.RawStorage().WatchDir()); err != nil { +- return nil, err +- } +- +- ws.monitor = sync.RunMonitor(func() { +- ws.monitorFunc(ws.RawStorage(), files) // Offload the file registration to the goroutine +- }) +- +- return ws, nil +-} +- +-// EventDeleteObjectName is used as the name of an object sent to the +-// GenericWatchStorage's event stream when the the object has been deleted +-const EventDeleteObjectName = "" +- +-// GenericWatchStorage implements the WatchStorage interface +-type GenericWatchStorage struct { +- storage.Storage +- watcher *watcher.FileWatcher +- events update.UpdateStream +- monitor *sync.Monitor +-} +- +-var _ update.EventStorage = &GenericWatchStorage{} +- +-// Suspend modify events during Create +-func (s *GenericWatchStorage) Create(obj runtime.Object) error { +- s.watcher.Suspend(watcher.FileEventModify) +- return s.Storage.Create(obj) +-} +- +-// Suspend modify events during Update +-func (s *GenericWatchStorage) Update(obj runtime.Object) error { +- s.watcher.Suspend(watcher.FileEventModify) +- return s.Storage.Update(obj) +-} +- +-// Suspend modify events during Patch +-func (s *GenericWatchStorage) Patch(key storage.ObjectKey, patch []byte) error { +- s.watcher.Suspend(watcher.FileEventModify) +- return s.Storage.Patch(key, patch) +-} +- +-// Suspend delete events during Delete +-func (s *GenericWatchStorage) Delete(key storage.ObjectKey) error { +- s.watcher.Suspend(watcher.FileEventDelete) +- return s.Storage.Delete(key) +-} +- +-func (s *GenericWatchStorage) SetUpdateStream(eventStream update.UpdateStream) { +- s.events = eventStream +-} +- +-func (s *GenericWatchStorage) Close() error { +- s.watcher.Close() +- s.monitor.Wait() +- return nil +-} +- +-func (s *GenericWatchStorage) monitorFunc(raw storage.RawStorage, files []string) { +- log.Debug("GenericWatchStorage: Monitoring thread started") +- defer log.Debug("GenericWatchStorage: Monitoring thread stopped") +- var content []byte +- +- // Send a MODIFY event for all files (and fill the mappings +- // of the MappedRawStorage) before starting to monitor changes +- for _, file := range files { +- content, err := ioutil.ReadFile(file) +- if err != nil { +- log.Warnf("Ignoring %q: %v", file, err) +- continue +- } +- +- obj, err := runtime.NewPartialObject(content) +- if err != nil { +- log.Warnf("Ignoring %q: %v", file, err) +- continue +- } +- +- // Add a mapping between this object and path +- s.addMapping(raw, obj, file) +- // Send the event to the events channel +- s.sendEvent(update.ObjectEventModify, obj) +- } +- +- for { +- if event, ok := <-s.watcher.GetFileUpdateStream(); ok { +- var partObj runtime.PartialObject +- var err error +- +- var objectEvent update.ObjectEvent +- switch event.Event { +- case watcher.FileEventModify: +- objectEvent = update.ObjectEventModify +- case watcher.FileEventDelete: +- objectEvent = update.ObjectEventDelete +- } +- +- log.Tracef("GenericWatchStorage: Processing event: %s", event.Event) +- if event.Event == watcher.FileEventDelete { +- key, err := raw.GetKey(event.Path) +- if err != nil { +- log.Warnf("Failed to retrieve data for %q: %v", event.Path, err) +- continue +- } +- +- // This creates a "fake" Object from the key to be used for +- // deletion, as the original has already been removed from disk +- apiVersion, kind := key.GetGVK().ToAPIVersionAndKind() +- partObj = &runtime.PartialObjectImpl{ +- TypeMeta: metav1.TypeMeta{ +- APIVersion: apiVersion, +- Kind: kind, +- }, +- ObjectMeta: metav1.ObjectMeta{ +- Name: EventDeleteObjectName, +- // TODO: This doesn't take into account where e.g. the identifier is "{namespace}/{name}" +- UID: types.UID(key.GetIdentifier()), +- }, +- } +- // remove the mapping for this key as it's now deleted +- s.removeMapping(raw, key) +- } else { +- content, err = ioutil.ReadFile(event.Path) +- if err != nil { +- log.Warnf("Ignoring %q: %v", event.Path, err) +- continue +- } +- +- if partObj, err = runtime.NewPartialObject(content); err != nil { +- log.Warnf("Ignoring %q: %v", event.Path, err) +- continue +- } +- +- if event.Event == watcher.FileEventMove { +- // Update the mappings for the moved file (AddMapping overwrites) +- s.addMapping(raw, partObj, event.Path) +- +- // Internal move events are a no-op +- continue +- } +- +- // This is based on the key's existence instead of watcher.EventCreate, +- // as Objects can get updated (via watcher.FileEventModify) to be conformant +- if _, err = raw.GetKey(event.Path); err != nil { +- // Add a mapping between this object and path +- s.addMapping(raw, partObj, event.Path) +- +- // This is what actually determines if an Object is created, +- // so update the event to update.ObjectEventCreate here +- objectEvent = update.ObjectEventCreate +- } +- } +- +- // Send the objectEvent to the events channel +- if objectEvent != update.ObjectEventNone { +- s.sendEvent(objectEvent, partObj) +- } +- } else { +- return +- } +- } +-} +- +-func (s *GenericWatchStorage) sendEvent(event update.ObjectEvent, partObj runtime.PartialObject) { +- if s.events != nil { +- log.Tracef("GenericWatchStorage: Sending event: %v", event) +- s.events <- update.Update{ +- Event: event, +- PartialObject: partObj, +- Storage: s, +- } +- } +-} +- +-// addMapping registers a mapping between the given object and the specified path, if raw is a +-// MappedRawStorage. If a given mapping already exists between this object and some path, it +-// will be overridden with the specified new path +-func (s *GenericWatchStorage) addMapping(raw storage.RawStorage, obj runtime.Object, file string) { +- mapped, ok := raw.(storage.MappedRawStorage) +- if !ok { +- return +- } +- +- // Let the embedded storage decide using its identifiers how to +- key, err := s.Storage.ObjectKeyFor(obj) +- if err != nil { +- log.Errorf("couldn't get object key for: gvk=%s, uid=%s, name=%s", obj.GetObjectKind().GroupVersionKind(), obj.GetUID(), obj.GetName()) +- } +- +- mapped.AddMapping(key, file) +-} +- +-// removeMapping removes a mapping a file that doesn't exist +-func (s *GenericWatchStorage) removeMapping(raw storage.RawStorage, key storage.ObjectKey) { +- mapped, ok := raw.(storage.MappedRawStorage) +- if !ok { +- return +- } +- +- mapped.RemoveMapping(key) +-} +diff --git a/pkg/storage/watch/update/event.go b/pkg/storage/watch/update/event.go +deleted file mode 100644 +index 57367b7..0000000 +--- a/pkg/storage/watch/update/event.go ++++ /dev/null +@@ -1,31 +0,0 @@ +-package update +- +-import "fmt" +- +-// ObjectEvent is an enum describing a change in an Object's state. +-type ObjectEvent byte +- +-var _ fmt.Stringer = ObjectEvent(0) +- +-const ( +- ObjectEventNone ObjectEvent = iota // 0 +- ObjectEventCreate // 1 +- ObjectEventModify // 2 +- ObjectEventDelete // 3 +-) +- +-func (o ObjectEvent) String() string { +- switch o { +- case 0: +- return "NONE" +- case 1: +- return "CREATE" +- case 2: +- return "MODIFY" +- case 3: +- return "DELETE" +- } +- +- // Should never happen +- return "UNKNOWN" +-} +diff --git a/pkg/storage/watch/update/update.go b/pkg/storage/watch/update/update.go +deleted file mode 100644 +index 05ea7e0..0000000 +--- a/pkg/storage/watch/update/update.go ++++ /dev/null +@@ -1,28 +0,0 @@ +-package update +- +-import ( +- "github.com/weaveworks/libgitops/pkg/runtime" +- "github.com/weaveworks/libgitops/pkg/storage" +-) +- +-// Update bundles an FileEvent with an +-// APIType for Storage retrieval. +-type Update struct { +- Event ObjectEvent +- PartialObject runtime.PartialObject +- Storage storage.Storage +-} +- +-// UpdateStream is a channel of updates. +-type UpdateStream chan Update +- +-// EventStorage is a storage that exposes an UpdateStream. +-type EventStorage interface { +- storage.Storage +- +- // SetUpdateStream gives the EventStorage a channel to send events to. +- // The caller is responsible for choosing a large enough buffer to avoid +- // blocking the underlying EventStorage implementation unnecessarily. +- // TODO: In the future maybe enable sending events to multiple listeners? +- SetUpdateStream(UpdateStream) +-} +diff --git a/pkg/util/fs.go b/pkg/util/fs.go +deleted file mode 100644 +index 3e1f7d4..0000000 +--- a/pkg/util/fs.go ++++ /dev/null +@@ -1,23 +0,0 @@ +-package util +- +-import ( +- "os" +-) +- +-func PathExists(path string) (bool, os.FileInfo) { +- info, err := os.Stat(path) +- if os.IsNotExist(err) { +- return false, nil +- } +- +- return true, info +-} +- +-func FileExists(filename string) bool { +- exists, info := PathExists(filename) +- if !exists { +- return false +- } +- +- return !info.IsDir() +-} +diff --git a/pkg/util/patch/patch.go b/pkg/util/patch/patch.go +index 11c29ea..535be55 100644 +--- a/pkg/util/patch/patch.go ++++ b/pkg/util/patch/patch.go +@@ -1,103 +1,88 @@ + package patch + + import ( +- "bytes" ++ "encoding/json" + "fmt" +- "io/ioutil" + +- "github.com/weaveworks/libgitops/pkg/runtime" +- "github.com/weaveworks/libgitops/pkg/serializer" +- "k8s.io/apimachinery/pkg/runtime/schema" ++ jsonbytepatcher "github.com/evanphx/json-patch" ++ "k8s.io/apimachinery/pkg/api/errors" ++ "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/strategicpatch" + ) + +-type Patcher interface { +- Create(new runtime.Object, applyFn func(runtime.Object) error) ([]byte, error) +- Apply(original, patch []byte, gvk schema.GroupVersionKind) ([]byte, error) +- ApplyOnFile(filePath string, patch []byte, gvk schema.GroupVersionKind) error ++// BytePatcherForType returns the right BytePatcher for the given ++// patch type. ++// ++// Note: if patchType is unknown, the return value will be nil, so make ++// sure you check the BytePatcher is non-nil before using it! ++func BytePatcherForType(patchType types.PatchType) BytePatcher { ++ switch patchType { ++ case types.JSONPatchType: ++ return JSONBytePatcher{} ++ case types.MergePatchType: ++ return MergeBytePatcher{} ++ case types.StrategicMergePatchType: ++ return StrategicMergeBytePatcher{} ++ default: ++ return nil ++ } + } + +-func NewPatcher(s serializer.Serializer) Patcher { +- return &patcher{serializer: s} +-} ++// maximum number of operations a single json patch may contain. ++const maxJSONBytePatcherOperations = 10000 + +-type patcher struct { +- serializer serializer.Serializer ++type BytePatcher interface { ++ // TODO: SupportedType() types.PatchType ++ // currentData must be versioned bytes of the same GVK as into and patch.Data() (if merge patch) ++ // into must be an empty object ++ Apply(currentJSON, patchJSON []byte, schema strategicpatch.LookupPatchMeta) ([]byte, error) + } + +-// Create is a helper that creates a patch out of the change made in applyFn +-func (p *patcher) Create(new runtime.Object, applyFn func(runtime.Object) error) (patchBytes []byte, err error) { +- var oldBytes, newBytes bytes.Buffer +- encoder := p.serializer.Encoder() +- old := new.DeepCopyObject().(runtime.Object) +- +- if err = encoder.Encode(serializer.NewJSONFrameWriter(&oldBytes), old); err != nil { +- return +- } +- +- if err = applyFn(new); err != nil { +- return +- } +- +- if err = encoder.Encode(serializer.NewJSONFrameWriter(&newBytes), new); err != nil { +- return ++type JSONBytePatcher struct{} ++ ++func (JSONBytePatcher) Apply(currentJSON, patchJSON []byte, _ strategicpatch.LookupPatchMeta) ([]byte, error) { ++ // sanity check potentially abusive patches ++ // TODO(liggitt): drop this once golang json parser limits stack depth (https://github.com/golang/go/issues/31789) ++ // TODO(luxas): Go v1.15 has the above mentioned patch, what needs changing now? ++ if len(patchJSON) > 1024*1024 { ++ v := []interface{}{} ++ if err := json.Unmarshal(patchJSON, &v); err != nil { ++ return nil, fmt.Errorf("error decoding patch: %v", err) ++ } + } + +- emptyObj, err := p.serializer.Scheme().New(old.GetObjectKind().GroupVersionKind()) +- if err != nil { +- return +- } +- +- patchBytes, err = strategicpatch.CreateTwoWayMergePatch(oldBytes.Bytes(), newBytes.Bytes(), emptyObj) +- if err != nil { +- return nil, fmt.Errorf("CreateTwoWayMergePatch failed: %v", err) +- } +- +- return patchBytes, nil +-} +- +-func (p *patcher) Apply(original, patch []byte, gvk schema.GroupVersionKind) ([]byte, error) { +- emptyObj, err := p.serializer.Scheme().New(gvk) ++ patchObj, err := jsonbytepatcher.DecodePatch(patchJSON) + if err != nil { + return nil, err + } +- +- b, err := strategicpatch.StrategicMergePatch(original, patch, emptyObj) +- if err != nil { +- return nil, err ++ if len(patchObj) > maxJSONBytePatcherOperations { ++ return nil, errors.NewRequestEntityTooLargeError( ++ fmt.Sprintf("The allowed maximum operations in a JSON patch is %d, got %d", ++ maxJSONBytePatcherOperations, len(patchObj))) + } +- +- return p.serializerEncode(b) ++ return patchObj.Apply(currentJSON) + } + +-func (p *patcher) ApplyOnFile(filePath string, patch []byte, gvk schema.GroupVersionKind) error { +- oldContent, err := ioutil.ReadFile(filePath) +- if err != nil { +- return err +- } +- +- newContent, err := p.Apply(oldContent, patch, gvk) +- if err != nil { +- return err ++type MergeBytePatcher struct{} ++ ++func (MergeBytePatcher) Apply(currentJSON, patchJSON []byte, _ strategicpatch.LookupPatchMeta) ([]byte, error) { ++ // sanity check potentially abusive patches ++ // TODO(liggitt): drop this once golang json parser limits stack depth (https://github.com/golang/go/issues/31789) ++ // TODO(luxas): Go v1.15 has the above mentioned patch, what needs changing now? ++ if len(patchJSON) > 1024*1024 { ++ v := map[string]interface{}{} ++ if err := json.Unmarshal(patchJSON, &v); err != nil { ++ return nil, errors.NewBadRequest(fmt.Sprintf("error decoding patch: %v", err)) ++ } + } + +- return ioutil.WriteFile(filePath, newContent, 0644) ++ return jsonbytepatcher.MergePatch(currentJSON, patchJSON) + } + +-// StrategicMergePatch returns an unindented, unorganized JSON byte slice, +-// this helper takes that as an input and returns the same JSON re-encoded +-// with the serializer so it conforms to a runtime.Object +-// TODO: Just use encoding/json.Indent here instead? +-func (p *patcher) serializerEncode(input []byte) ([]byte, error) { +- obj, err := p.serializer.Decoder().Decode(serializer.NewJSONFrameReader(serializer.FromBytes(input))) +- if err != nil { +- return nil, err +- } +- +- var result bytes.Buffer +- if err := p.serializer.Encoder().Encode(serializer.NewJSONFrameWriter(&result), obj); err != nil { +- return nil, err +- } ++type StrategicMergeBytePatcher struct{} + +- return result.Bytes(), err ++func (StrategicMergeBytePatcher) Apply(currentJSON, patchJSON []byte, schema strategicpatch.LookupPatchMeta) ([]byte, error) { ++ // TODO: Also check for overflow here? ++ // TODO: What to do when schema is nil? error? ++ return strategicpatch.StrategicMergePatchUsingLookupPatchMeta(currentJSON, patchJSON, schema) + } +diff --git a/pkg/util/patch/patch_test.go b/pkg/util/patch/patch_test.go +index 9a3cf54..c9d1b01 100644 +--- a/pkg/util/patch/patch_test.go ++++ b/pkg/util/patch/patch_test.go +@@ -1,5 +1,9 @@ + package patch + ++/* ++ ++TODO: Create good unit tests for this package! ++ + import ( + "bytes" + "testing" +@@ -58,3 +62,4 @@ func TestApplyPatch(t *testing.T) { + t.Fatal(err) + } + } ++*/ +diff --git a/pkg/util/sync/monitor.go b/pkg/util/sync/monitor.go +index f09c55c..111a294 100644 +--- a/pkg/util/sync/monitor.go ++++ b/pkg/util/sync/monitor.go +@@ -1,31 +1,39 @@ + package sync + +-import "sync" ++import ( ++ "errors" ++ "sync" ++) + + // Monitor is a convenience wrapper around + // starting a goroutine with a wait group, + // which can be used to wait for the + // goroutine to stop. + type Monitor struct { +- wg *sync.WaitGroup ++ wg *sync.WaitGroup ++ err error + } + +-func RunMonitor(f func()) (m *Monitor) { +- m = &Monitor{ ++func RunMonitor(f func() error) *Monitor { ++ m := &Monitor{ + wg: new(sync.WaitGroup), + } + + m.wg.Add(1) + go func() { +- f() ++ m.err = f() + m.wg.Done() + }() + +- return ++ return m + } + +-func (m *Monitor) Wait() { +- if m != nil { +- m.wg.Wait() ++func (m *Monitor) Wait() error { ++ // TODO: Do we need this check? ++ if m == nil { ++ return errors.New("Monitor: invalid null pointer to m") + } ++ // TODO: maybe this could be easier implemented using just a channel? ++ m.wg.Wait() ++ return m.err + } +diff --git a/pkg/util/util.go b/pkg/util/util.go +deleted file mode 100644 +index c80159c..0000000 +--- a/pkg/util/util.go ++++ /dev/null +@@ -1,54 +0,0 @@ +-package util +- +-import ( +- "bytes" +- "crypto/rand" +- "encoding/hex" +- "fmt" +- "os/exec" +- "strings" +-) +- +-func ExecuteCommand(command string, args ...string) (string, error) { +- cmd := exec.Command(command, args...) +- out, err := cmd.CombinedOutput() +- if err != nil { +- return "", fmt.Errorf("command %q exited with %q: %v", cmd.Args, out, err) +- } +- +- return string(bytes.TrimSpace(out)), nil +-} +- +-func MatchPrefix(prefix string, fields ...string) ([]string, bool) { +- var prefixMatches, exactMatches []string +- +- for _, str := range fields { +- if str == prefix { +- exactMatches = append(exactMatches, str) +- } else if strings.HasPrefix(str, prefix) { +- prefixMatches = append(prefixMatches, str) +- } +- } +- +- // If we have exact matches, return them +- // and set the exact match boolean +- if len(exactMatches) > 0 { +- return exactMatches, true +- } +- +- return prefixMatches, false +-} +- +-func BoolPtr(b bool) *bool { +- return &b +-} +- +-// RandomSHA returns a hex-encoded string from {byteLen} random bytes. +-func RandomSHA(byteLen int) (string, error) { +- b := make([]byte, byteLen) +- _, err := rand.Read(b) +- if err != nil { +- return "", err +- } +- return hex.EncodeToString(b), nil +-} +diff --git a/pkg/util/watcher/dir_traversal.go b/pkg/util/watcher/dir_traversal.go +deleted file mode 100644 +index 739ecf7..0000000 +--- a/pkg/util/watcher/dir_traversal.go ++++ /dev/null +@@ -1,60 +0,0 @@ +-package watcher +- +-import ( +- "os" +- "path/filepath" +- "strings" +-) +- +-func (w *FileWatcher) getFiles() ([]string, error) { +- return WalkDirectoryForFiles(w.dir, w.opts.ValidExtensions, w.opts.ExcludeDirs) +-} +- +-func (w *FileWatcher) validFile(path string) bool { +- return isValidFile(path, w.opts.ValidExtensions, w.opts.ExcludeDirs) +-} +- +-// WalkDirectoryForFiles discovers all subdirectories and +-// returns a list of valid files in them +-func WalkDirectoryForFiles(dir string, validExts, excludeDirs []string) (files []string, err error) { +- err = filepath.Walk(dir, +- func(path string, info os.FileInfo, err error) error { +- if err != nil { +- return err +- } +- +- if !info.IsDir() { +- // Only include valid files +- if isValidFile(path, validExts, excludeDirs) { +- files = append(files, path) +- } +- } +- +- return nil +- }) +- +- return +-} +- +-// isValidFile is used to filter out all unsupported +-// files based on if their extension is unknown or +-// if their path contains an excluded directory +-func isValidFile(path string, validExts, excludeDirs []string) bool { +- parts := strings.Split(filepath.Clean(path), string(os.PathSeparator)) +- ext := filepath.Ext(parts[len(parts)-1]) +- for _, suffix := range validExts { +- if ext == suffix { +- return true +- } +- } +- +- for i := 0; i < len(parts)-1; i++ { +- for _, exclude := range excludeDirs { +- if parts[i] == exclude { +- return false +- } +- } +- } +- +- return false +-} +diff --git a/pkg/util/watcher/event.go b/pkg/util/watcher/event.go +deleted file mode 100644 +index 4da933d..0000000 +--- a/pkg/util/watcher/event.go ++++ /dev/null +@@ -1,64 +0,0 @@ +-package watcher +- +-import ( +- "fmt" +- "strings" +-) +- +-// FileEvent is an enum describing a change in a file's state +-type FileEvent byte +- +-const ( +- FileEventNone FileEvent = iota // 0 +- FileEventModify // 1 +- FileEventDelete // 2 +- FileEventMove // 3 +-) +- +-func (e FileEvent) String() string { +- switch e { +- case 0: +- return "NONE" +- case 1: +- return "MODIFY" +- case 2: +- return "DELETE" +- case 3: +- return "MOVE" +- } +- +- return "UNKNOWN" +-} +- +-// FileEvents is a slice of FileEvents +-type FileEvents []FileEvent +- +-var _ fmt.Stringer = FileEvents{} +- +-func (e FileEvents) String() string { +- strs := make([]string, 0, len(e)) +- for _, ev := range e { +- strs = append(strs, ev.String()) +- } +- +- return strings.Join(strs, ",") +-} +- +-func (e FileEvents) Bytes() []byte { +- b := make([]byte, 0, len(e)) +- for _, event := range e { +- b = append(b, byte(event)) +- } +- +- return b +-} +- +-// FileUpdates is a slice of FileUpdate pointers +-type FileUpdates []*FileUpdate +- +-// FileUpdate is used by watchers to +-// signal the state change of a file. +-type FileUpdate struct { +- Event FileEvent +- Path string +-} diff --git a/go.mod b/go.mod index 3f4629c7..c1a3f249 100644 --- a/go.mod +++ b/go.mod @@ -5,26 +5,34 @@ go 1.15 replace github.com/docker/distribution => github.com/docker/distribution v2.7.1+incompatible require ( - github.com/evanphx/json-patch v4.9.0+incompatible - github.com/fluxcd/go-git-providers v0.0.3 - github.com/fluxcd/pkg/ssh v0.0.5 - github.com/go-git/go-git/v5 v5.2.0 - github.com/go-openapi/spec v0.20.0 - github.com/google/btree v1.0.0 + github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 // indirect + github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 // indirect + github.com/evanphx/json-patch v4.11.0+incompatible + github.com/fluxcd/go-git-providers v0.2.0 + github.com/fluxcd/pkg/ssh v0.2.0 + github.com/go-git/go-git/v5 v5.4.2 + github.com/go-openapi/spec v0.20.3 + github.com/go-openapi/strfmt v0.19.5 // indirect + github.com/go-openapi/validate v0.19.8 // indirect + github.com/google/btree v1.0.1 github.com/google/go-github/v32 v32.1.0 github.com/labstack/echo v3.3.10+incompatible github.com/labstack/gommon v0.3.0 // indirect - github.com/mattn/go-isatty v0.0.12 // indirect + github.com/markbates/pkger v0.17.1 // indirect + github.com/mattn/go-isatty v0.0.13 // indirect github.com/mitchellh/go-homedir v1.1.0 + github.com/qri-io/starlib v0.4.2-0.20200213133954-ff2e8cd5ef8d // indirect github.com/rjeczalik/notify v0.9.2 - github.com/sirupsen/logrus v1.7.0 - github.com/spf13/afero v1.2.2 + github.com/sirupsen/logrus v1.8.1 + github.com/spf13/afero v1.6.0 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.6.1 - golang.org/x/sys v0.0.0-20210108172913-0df2131ae363 - k8s.io/apimachinery v0.19.6 - k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 - k8s.io/utils v0.0.0-20200912215256-4140de9c8800 - sigs.k8s.io/controller-runtime v0.7.0 - sigs.k8s.io/kustomize/kyaml v0.10.5 + github.com/stretchr/testify v1.7.0 + go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee // indirect + golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 + gotest.tools v2.2.0+incompatible // indirect + k8s.io/apimachinery v0.21.2 + k8s.io/kube-openapi v0.0.0-20210527164424-3c818078ee3d + k8s.io/utils v0.0.0-20210527160623-6fdb442a123b + sigs.k8s.io/controller-runtime v0.9.1 + sigs.k8s.io/kustomize/kyaml v0.10.21 ) diff --git a/go.sum b/go.sum index e8f2095e..d60d6a0e 100644 --- a/go.sum +++ b/go.sum @@ -5,40 +5,65 @@ cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6A cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/360EntSecGroup-Skylar/excelize v1.4.1/go.mod h1:vnax29X2usfl7HHkBrX5EvSCJcmH3dT9luvxzu8iGAE= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= +github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 h1:YoJbenK9C67SkzkDfmQuVln04ygHj3vjZfd9FL+GmQQ= +github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= github.com/PuerkitoBio/goquery v1.5.0/go.mod h1:qD2PgZ9lccMbQlc7eEOjaeRlFQON7xY8kdmcsrnKqMg= -github.com/PuerkitoBio/purell v1.0.0 h1:0GoNN3taZV6QI81IXgCbxMyEaJDXMSIjArYBCYzVVvs= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2 h1:JCHLVE3B+kJde7bIEo5N4J+ZbLhp0J1Fs+ulyRws4gE= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk= +github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs= github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= @@ -46,11 +71,15 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= @@ -60,7 +89,9 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= @@ -73,6 +104,7 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -83,6 +115,7 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -96,7 +129,6 @@ github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:Htrtb github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustmop/soup v1.1.2-0.20190516214245-38228baa104e/go.mod h1:CgNC6SGbT+Xb8wGGvzilttZL1mc5sQ/5KkcxsZttMIk= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633 h1:H2pdYOb3KQ1/YsqVWoWNLQO+fusocsw354rqGTZtAgw= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -104,18 +136,24 @@ github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs= +github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fluxcd/go-git-providers v0.0.3 h1:pquQvTpd1a4V1efPyZWuVPeIKrTgV8QRoDY0VGH+qiw= github.com/fluxcd/go-git-providers v0.0.3/go.mod h1:iaXf3nEq8MB/LzxfbNcCl48sAtIReUU7jqjJ7CEnfFQ= +github.com/fluxcd/go-git-providers v0.2.0 h1:2dxT4r9UDjKwsNFmO9wcSR2FUqKyvsDwha5b/zvK1Ko= +github.com/fluxcd/go-git-providers v0.2.0/go.mod h1:nRgNpHZmZhrsyNSma1JcAhjUG9xrqMGJcIUr9K7M7vk= github.com/fluxcd/pkg/ssh v0.0.5 h1:rnbFZ7voy2JBlUfMbfyqArX2FYaLNpDhccGFC3qW83A= github.com/fluxcd/pkg/ssh v0.0.5/go.mod h1:7jXPdXZpc0ttMNz2kD9QuMi3RNn/e0DOFbj0Tij/+Hs= +github.com/fluxcd/pkg/ssh v0.2.0 h1:e9V+HReOL7czm7edVzYS1e+CnFKz1/kHiUNfLRpBdH8= +github.com/fluxcd/pkg/ssh v0.2.0/go.mod h1:EpQC7Ztdlbi8S/dlYXqVDZtHtLpN3FNl3N6zWujVzbA= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= @@ -131,22 +169,34 @@ github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= github.com/go-git/go-billy/v5 v5.0.0 h1:7NQHvd9FVid8VL4qVUMm8XifBK+2xCoZ2lSk0agRrHM= github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= +github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= +github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34= +github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12 h1:PbKy9zOy4aAKrJ5pibIRpVO2BXnK1Tlcg+caKI7Ox5M= github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= +github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0= github.com/go-git/go-git/v5 v5.2.0 h1:YPBLG/3UK1we1ohRkncLjaXWLW+HKp5QNM/jTli2JgI= github.com/go-git/go-git/v5 v5.2.0/go.mod h1:kh02eMX+wdqqxgNMEyq8YgwlIOsDOa9homkUq1PoTMs= +github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4= +github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.3.0 h1:q4c+kbcR0d5rSurhBR8dIgieOaYpXtsdTYfx22Cu6rs= github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= +github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/zapr v0.2.0 h1:v6Ji8yBW77pva6NkJKQdHLAJKrIJKRHz0RXwPqCHSR4= github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU= +github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -155,23 +205,17 @@ github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2 github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1 h1:wSt/4CYxs70xbATrGXhokKF1i0tZjENLOo1ioIO13zk= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9 h1:tF+augKRWlWx0J0B7ZyyKSiTyV6E1zZe+7b3qQlcEf8= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= @@ -183,58 +227,62 @@ github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCs github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501 h1:C1JKChikHGpXwT5UQDFaryIpDtyyGL/CR6C2kB7F1oc= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= -github.com/go-openapi/spec v0.19.3 h1:0XRyw8kguri6Yw4SxhsQA/atC88yqrk0+G4YhI2wabc= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/spec v0.19.5 h1:Xm0Ao53uqnk9QE/LlYV5DEU09UAgpliA85QoT9LzqPw= github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= github.com/go-openapi/spec v0.20.0 h1:HGLc8AJ7ynOxwv0Lq4TsnwLsWMawHAYiJIFzbcML86I= github.com/go-openapi/spec v0.20.0/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= +github.com/go-openapi/spec v0.20.3 h1:uH9RQ6vdyPSs2pSy9fL8QPspDF2AMIMPtmK5coSSjtQ= +github.com/go-openapi/spec v0.20.3/go.mod h1:gG4F8wdEDN+YPBMVnzE85Rbhf+Th2DTvA9nFPQ5AYEg= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87 h1:zP3nY8Tk2E6RTkqGYrarZXuzh+ffyLDljLxCy1iJw80= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.12 h1:Bc0bnY2c3AoF7Gc+IMIAQQsD8fLHjHpc19wXvYuayQI= github.com/go-openapi/swag v0.19.12/go.mod h1:eFdyEBkTdoAf/9RXBvj4cr1nH7GD8Kzo5HTt47gr72M= +github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -243,22 +291,29 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github/v32 v32.1.0 h1:GWkQOdXqviCPx7Q7Fj+KyPoGm4SwHRh8rheoPhd27II= github.com/google/go-github/v32 v32.1.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -266,17 +321,25 @@ github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXi github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.5.1 h1:A8Yhf6EtqTv9RMsU6MQTyrtV1TjWlR6xU9BsZIwuTCM= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= +github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -285,55 +348,77 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg= github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10 h1:6q5mVkdH/vYmqngx7kZQTjJ5HRsx+ImorDIEQ+beJgc= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= github.com/k0kubun/pp v2.3.0+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg= github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd h1:Coekwdh0v2wtGp9Gmz1Ze3eVRAWJMLokvN3QjdzCHLY= github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 h1:DowS9hvgyYSX4TO5NpyC606/Z4SxnNYbT+WX27or6Ck= +github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= @@ -343,39 +428,46 @@ github.com/labstack/echo v3.3.10+incompatible/go.mod h1:0INS7j/VjnFxD4E2wkz67b8c github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a h1:TpvdAwDAt1K4ANVOfcihouRdvP+MgAfDWwBuct4l6ZY= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/markbates/pkger v0.17.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= +github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9 h1:d5US/mDsogSGW37IV293h//ZFaeajb69h+EHFsv2xGg= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.13 h1:qdl+GuBjcsKKDco5BsxPJlId98mSWNKqYA+Co0SC1yA= +github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -387,47 +479,52 @@ github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4= github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs= github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/paulmach/orb v0.1.3/go.mod h1:VFlX/8C+IQ1p6FTRRKzKoOPJnvEtA5G0Veuqwbu//Vk= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -438,11 +535,16 @@ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/qri-io/starlib v0.4.2-0.20200213133954-ff2e8cd5ef8d/go.mod h1:7DPO4domFU579Ga6E61sB9VFNaniPVwJP5C4bBCu3wA= github.com/rjeczalik/notify v0.9.2 h1:MiTWrPj55mNDHEiIX5YUSKefw/+lCQVoAFmD6oQm5w8= @@ -450,25 +552,33 @@ github.com/rjeczalik/notify v0.9.2/go.mod h1:aErll2f0sUX9PXZnVNyeiObbmTlk5jnMoCa github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -476,6 +586,7 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -483,13 +594,14 @@ github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.3-0.20181224173747-660f15d67dbb/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= @@ -501,62 +613,78 @@ github.com/valyala/fasttemplate v1.0.1 h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8W github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/xanzy/go-gitlab v0.33.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug= +github.com/xanzy/go-gitlab v0.43.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug= github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= +github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI= +github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.starlark.net v0.0.0-20190528202925-30ae18b8564f/go.mod h1:c1/X6cHgvdXj6pUlmWKMkuqRnW4K8x2vwt6JAaaircg= go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073 h1:xMPOj6Pz6UipU1wXLkrtqpHbR0AVFnyPEQq/wRWz9lM= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b h1:7mWr3k41Qtv8XlltBkDkl8LoP3mpSgBW8BUoxtEdbXg= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -568,67 +696,86 @@ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180926160741-c2ed4eda69e7/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -637,17 +784,18 @@ golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f h1:25KHgbfyiSm6vwQLbM3zZIe1v9p/3ea4Rz+nnM5K/i4= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -655,34 +803,58 @@ golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210108172913-0df2131ae363 h1:wHn06sgWHMO1VsQ8F+KzDJx/JzqfsNLnc+oEi07qD7s= golang.org/x/sys v0.0.0-20210108172913-0df2131ae363/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6 h1:Vv0JUPWTyeqUq42B2WJ1FeIDjjvGKoA2Ss+Ts0lAVbs= +golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -693,11 +865,11 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59 h1:QjA/9ArTfVTLfEhClDCG7SGrZkZixxWpwNCDiwJfh88= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -709,32 +881,59 @@ golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200616133436-c1934b75d054 h1:HHeAlu5H9b71C+Fx0K+1dGgVFN1DM1/wz4aoGOA5qS8= golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.1.0 h1:Phva6wqu+xR//Njw6iorylFFgn/z547tw5Ne3HZPQ+k= gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= +gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= +gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/appengine v1.0.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -743,8 +942,19 @@ google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -752,31 +962,34 @@ google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= @@ -786,61 +999,92 @@ gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.19.2 h1:q+/krnHWKsL7OBZg/rxnycsl9569Pud76UJ77MvKXms= k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI= +k8s.io/api v0.21.2 h1:vz7DqmRsXTCSa6pNxXwQ1IYeAZgdIsua+DZU+o+SX3Y= +k8s.io/api v0.21.2/go.mod h1:Lv6UGJZ1rlMI1qusN8ruAp9PUBFyBwpEHAdG24vIsiU= k8s.io/apiextensions-apiserver v0.19.2 h1:oG84UwiDsVDu7dlsGQs5GySmQHCzMhknfhFExJMz9tA= k8s.io/apiextensions-apiserver v0.19.2/go.mod h1:EYNjpqIAvNZe+svXVx9j4uBaVhTB4C94HkY3w058qcg= +k8s.io/apiextensions-apiserver v0.21.2 h1:+exKMRep4pDrphEafRvpEi79wTnCFMqKf8LBtlA3yrE= +k8s.io/apiextensions-apiserver v0.21.2/go.mod h1:+Axoz5/l3AYpGLlhJDfcVQzCerVYq3K3CvDMvw6X1RA= k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/apimachinery v0.19.6 h1:kBLzSGuDdY1NdSV2uFzI+FwZ9wtkmG+X3ZVcWXSqNgA= k8s.io/apimachinery v0.19.6/go.mod h1:6sRbGRAVY5DOCuZwB5XkqguBqpqLU6q/kOaOdk29z6Q= +k8s.io/apimachinery v0.21.2 h1:vezUc/BHqWlQDnZ+XkrpXSmnANSLbpnlpwo0Lhk0gpc= +k8s.io/apimachinery v0.21.2/go.mod h1:CdTY8fU/BlvAbJ2z/8kBwimGki5Zp8/fbVuLY8gJumM= k8s.io/apiserver v0.19.2/go.mod h1:FreAq0bJ2vtZFj9Ago/X0oNGC51GfubKK/ViOKfVAOA= +k8s.io/apiserver v0.21.2/go.mod h1:lN4yBoGyiNT7SC1dmNk0ue6a5Wi6O3SWOIw91TsucQw= k8s.io/client-go v0.19.2 h1:gMJuU3xJZs86L1oQ99R4EViAADUPMHHtS9jFshasHSc= k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA= +k8s.io/client-go v0.21.2 h1:Q1j4L/iMN4pTw6Y4DWppBoUxgKO8LbffEMVEV00MUp0= +k8s.io/client-go v0.21.2/go.mod h1:HdJ9iknWpbl3vMGtib6T2PyI/VYxiZfq936WNVHBRrA= k8s.io/code-generator v0.19.2/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= +k8s.io/code-generator v0.21.2/go.mod h1:8mXJDCB7HcRo1xiEQstcguZkbxZaqeUOrO9SsicWs3U= k8s.io/component-base v0.19.2 h1:jW5Y9RcZTb79liEhW3XDVTW7MuvEGP0tQZnfSX6/+gs= k8s.io/component-base v0.19.2/go.mod h1:g5LrsiTiabMLZ40AR6Hl45f088DevyGY+cCE2agEIVo= +k8s.io/component-base v0.21.2 h1:EsnmFFoJ86cEywC0DoIkAUiEV6fjgauNugiw1lmIjs4= +k8s.io/component-base v0.21.2/go.mod h1:9lvmIThzdlrJj5Hp8Z/TOgIkdfsNARQ1pT+3PByuiuc= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= +k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= +k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/kube-openapi v0.0.0-20210527164424-3c818078ee3d h1:lUK8GPtuJy8ClWZhuvKoaLdKGPLq9H1PxWp7VPBZBkU= +k8s.io/kube-openapi v0.0.0-20210527164424-3c818078ee3d/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20200912215256-4140de9c8800 h1:9ZNvfPvVIEsp/T1ez4GQuzCcCTEQWhovSofhqR73A6g= k8s.io/utils v0.0.0-20200912215256-4140de9c8800/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210527160623-6fdb442a123b h1:MSqsVQ3pZvPGTqCjptfimO2WjG7A9un2zcpiHkA6M/s= +k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/controller-runtime v0.7.0 h1:bU20IBBEPccWz5+zXpLnpVsgBYxqclaHu1pVDl/gEt8= sigs.k8s.io/controller-runtime v0.7.0/go.mod h1:pJ3YBrJiAqMAZKi6UVGuE98ZrroV1p+pIhoHsMm9wdU= +sigs.k8s.io/controller-runtime v0.9.1 h1:+LAqHAhkVW4lt/jLlrKmnGPA7OORMw/xEUH3Ey1h1Bs= +sigs.k8s.io/controller-runtime v0.9.1/go.mod h1:cTqsgnwSOsYS03XwySYZj8k6vf0+eC4FJRcCgQ9elb4= sigs.k8s.io/kustomize/kyaml v0.10.5 h1:PbJcsZsEM7O3hHtUWTR+4WkHVbQRW9crSy75or1gRbI= sigs.k8s.io/kustomize/kyaml v0.10.5/go.mod h1:P6Oy/ah/GZMKzJMIJA2a3/bc8YrBkuL5kJji13PSIzY= +sigs.k8s.io/kustomize/kyaml v0.10.21 h1:KdoEgz3HzmcaLUTFqs6aaqFpsaA9MVRIwOZbi8vMaD0= +sigs.k8s.io/kustomize/kyaml v0.10.21/go.mod h1:TYWhGwW9vjoRh3rWqBwB/ZOXyEGRVWe7Ggc3+KZIO+c= sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8= +sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/pkg/storage/client/transactional/client.go b/pkg/storage/client/transactional/client.go index 4d786547..bd29db97 100644 --- a/pkg/storage/client/transactional/client.go +++ b/pkg/storage/client/transactional/client.go @@ -2,119 +2,113 @@ package transactional import ( "context" + "errors" "fmt" "strings" - "sync" "sync/atomic" "github.com/sirupsen/logrus" "github.com/weaveworks/libgitops/pkg/storage/backend" "github.com/weaveworks/libgitops/pkg/storage/client" "github.com/weaveworks/libgitops/pkg/storage/core" + syncutil "github.com/weaveworks/libgitops/pkg/util/sync" utilerrs "k8s.io/apimachinery/pkg/util/errors" ) var _ Client = &Generic{} -func NewGeneric(c client.Client, manager BranchManager, merger BranchMerger) (Client, error) { +func NewGeneric(c client.Client, manager TransactionManager) (Client, error) { if c == nil { return nil, fmt.Errorf("%w: c is required", core.ErrInvalidParameter) } if manager == nil { return nil, fmt.Errorf("%w: manager is required", core.ErrInvalidParameter) } - return &Generic{ + g := &Generic{ c: c, - txs: make(map[string]*txLock), - txsMu: &sync.Mutex{}, + lockMap: syncutil.NewNamedLockMap(), txHooks: &MultiTransactionHook{}, commitHooks: &MultiCommitHook{}, manager: manager, - merger: merger, - }, nil + //merger: merger, + } + // We must be able to resolve versions + if g.versionRefResolver() == nil { + return nil, fmt.Errorf("%w: the underlying Client must provide a VersionRefResolver through its Storage", core.ErrInvalidParameter) + } + return g, nil } type Generic struct { c client.Client - // txs maps branches to their tx locks - txs map[string]*txLock - // txsMu guards reads and writes of txs - txsMu *sync.Mutex + lockMap syncutil.NamedLockMap // Hooks txHooks TransactionHookChain commitHooks CommitHookChain - // +optional - merger BranchMerger // +required - manager BranchManager + manager TransactionManager } +type txLockKeyImpl struct{} + +var txLockKey = txLockKeyImpl{} + type txLock struct { - // mu is locked for writing while the transaction is executing, and locked - // for reading, while a read operation is active. - mu *sync.RWMutex // mode specifies what transaction mode is used; Atomic or AllowReading. - mode TxMode + //mode TxMode // active == 1 means "transaction active, mu is locked for writing" // active == 0 means "transaction has stopped, mu has been unlocked" active uint32 } func (c *Generic) Get(ctx context.Context, key core.ObjectKey, obj client.Object) error { - return c.lockAndReadBranch(ctx, func() error { + return c.lockAndRead(ctx, func() error { return c.c.Get(ctx, key, obj) }) } func (c *Generic) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - return c.lockAndReadBranch(ctx, func() error { + return c.lockAndRead(ctx, func() error { return c.c.List(ctx, list, opts...) }) } -func (c *Generic) getBranchLockInfo(branch string) *txLock { - // c.txsMu guards reads and writes to the c.txs map - c.txsMu.Lock() - defer c.txsMu.Unlock() +func (c *Generic) versionRefResolver() core.VersionRefResolver { + return c.c.BackendReader().Storage().VersionRefResolver() +} - // Check if information about a transaction on this branch exists. - txState, ok := c.txs[branch] - if ok { - return txState - } - // if not, grow the txs map by one and return it - c.txs[branch] = &txLock{ - mu: &sync.RWMutex{}, - } - return c.txs[branch] +func (c *Generic) lockForBranch(branch string) (syncutil.LockWithData, *txLock, bool) { + lck := c.lockMap.LockByName(branch) + txState, ok := lck.QLoad(txLockKey).(*txLock) + return lck, txState, ok } -func (c *Generic) lockAndReadBranch(ctx context.Context, callback func() error) error { - // Aquire the branch-specific lock - branch := core.GetVersionRef(ctx).Branch() - txState := c.getBranchLockInfo(branch) - - // In the atomic mode, we lock the txLock during the read, - // so no new transactions can be started while the read - // operation goes on. In non-atomic modes, reads aren't locked, - // instead it is assumed that downstream implementations just - // read the latest commit on the given branch. - if txState.mode == TxModeAtomic { - txState.mu.RLock() - } - err := callback() - if txState.mode == TxModeAtomic { - txState.mu.RUnlock() +func (c *Generic) lockAndRead(ctx context.Context, callback func() error) error { + ref := core.GetVersionRef(ctx) + + _, immutable, err := c.versionRefResolver().ResolveVersionRef(ref) + if err != nil { + return err + } else if immutable { + // If this is an immutable revision, just continue the call + return callback() } - return err + + // At this point we know that ref is mutable (what we call a "branch" here), and commit is the fixed revision + lck := c.lockMap.LockByName(ref) + lck.Lock() + defer lck.Unlock() + // TODO: At what point should we resolve the "branch" -> "commit" part? Should we expect that to be done in the + // filesystem only? + return callback() } func (c *Generic) initTx(ctx context.Context, info TxInfo) (context.Context, txFunc) { - // Aquire the branch-specific lock - txState := c.getBranchLockInfo(info.Head) + // Get the head branch lock and status + lck := c.lockMap.LockByName(info.HeadBranch) // Wait for all reads to complete (in the case of the atomic more), // and then lock for writing. For non-atomic mode this uses the mutex @@ -125,9 +119,12 @@ func (c *Generic) initTx(ctx context.Context, info TxInfo) (context.Context, txF // regardless of mode. If atomic mode is enabled, this also waits // on any reads happening at this moment. For all modes, this ensures // transactions happen in order. - txState.mu.Lock() - txState.active = 1 // set tx state to "active" - txState.mode = info.Options.Mode // declare what transaction mode is used + lck.Lock() + txState := &txLock{ + active: 1, // set tx state to "active" + //mode: info.Options.Mode, // declare what transaction mode is used + } + lck.Store(txLockKey, txState) // Create a child context with a timeout dlCtx, cleanupTimeout := context.WithTimeout(ctx, info.Options.Timeout) @@ -136,10 +133,10 @@ func (c *Generic) initTx(ctx context.Context, info TxInfo) (context.Context, txF cleanupFunc := func() error { // Cleanup after the transaction if err := c.cleanupAfterTx(ctx, &info); err != nil { - return fmt.Errorf("Failed to cleanup branch %s after tx: %v", info.Head, err) + return fmt.Errorf("Failed to cleanup branch %s after tx: %v", info.HeadBranch, err) } // Unlock the mutex so new transactions can take place on this branch - txState.mu.Unlock() + lck.Unlock() return nil } @@ -163,7 +160,7 @@ func (c *Generic) initTx(ctx context.Context, info TxInfo) (context.Context, txF // If the parent context was cancelled or timed out; this // function and the above function race to set active => 0 // Regardless, due to the atomic nature of the operation, - // cleanupFunc() will only be run twice. + // cleanupFunc() will only be run once. if atomic.CompareAndSwapUint32(&txState.active, 1, 0) { // We can now stop the timeout timer cleanupTimeout() @@ -179,7 +176,8 @@ func (c *Generic) initTx(ctx context.Context, info TxInfo) (context.Context, txF func (c *Generic) cleanupAfterTx(ctx context.Context, info *TxInfo) error { // Always both clean the branch, and run post-tx tasks return utilerrs.NewAggregate([]error{ - c.manager.ResetToCleanBranch(ctx, info.Base), + // TODO: This should be "clean up the writable area" + c.manager.ResetToCleanVersion(ctx, info.Base), // TODO: should this be in its own goroutine to switch back to main // ASAP? c.TransactionHookChain().PostTransactionHook(ctx, *info), @@ -190,11 +188,11 @@ func (c *Generic) BackendReader() backend.Reader { return c.c.BackendReader() } -func (c *Generic) BranchMerger() BranchMerger { +/*func (c *Generic) BranchMerger() BranchMerger { return c.merger -} +}*/ -func (c *Generic) BranchManager() BranchManager { +func (c *Generic) TransactionManager() TransactionManager { return c.manager } @@ -209,39 +207,55 @@ func (c *Generic) CommitHookChain() CommitHookChain { func (c *Generic) Transaction(ctx context.Context, opts ...TxOption) Tx { tx, err := c.transaction(ctx, opts...) if err != nil { + // TODO: Return a Tx with an error included panic(err) } return tx } -func (c *Generic) BranchTransaction(ctx context.Context, headBranch string, opts ...TxOption) BranchTx { +func (c *Generic) BranchTransaction(ctx context.Context, headBranch string, opts ...TxOption) Tx { tx, err := c.branchTransaction(ctx, headBranch, opts...) if err != nil { + // TODO: Return a Tx with an error included panic(err) } return tx } +var ErrVersionRefIsImmutable = errors.New("cannot execute transaction against immutable version ref") + func (c *Generic) transaction(ctx context.Context, opts ...TxOption) (Tx, error) { + // Rules: A transaction executes against "itself". // Parse options o := defaultTxOptions().ApplyOptions(opts) - branch := core.GetVersionRef(ctx).Branch() + ref := core.GetVersionRef(ctx) + + baseCommit, isImmutable, err := c.versionRefResolver().ResolveVersionRef(ref) + if err != nil { + return nil, err + } + // We cannot apply a transaction against an immutable version + if isImmutable { + return nil, fmt.Errorf("%w: %s", ErrVersionRefIsImmutable, ref) + } + info := TxInfo{ - Base: branch, - Head: branch, - Options: *o, + BaseCommit: baseCommit, + HeadBranch: ref, + Options: *o, } // Initialize the transaction ctxWithDeadline, cleanupFunc := c.initTx(ctx, info) // Run pre-tx checks - err := c.TransactionHookChain().PreTransactionHook(ctxWithDeadline, info) + if err := c.TransactionHookChain().PreTransactionHook(ctxWithDeadline, info); err != nil { + return nil, err + } return &txImpl{ &txCommon{ - err: err, c: c.c, manager: c.manager, commitHook: c.CommitHookChain(), @@ -252,8 +266,9 @@ func (c *Generic) transaction(ctx context.Context, opts ...TxOption) (Tx, error) }, nil } -func (c *Generic) branchTransaction(ctx context.Context, headBranch string, opts ...TxOption) (BranchTx, error) { - baseBranch := core.GetVersionRef(ctx).Branch() +func (c *Generic) branchTransaction(ctx context.Context, headBranch string, opts ...TxOption) (Tx, error) { + // Get the base version reference. It is ok if it's immutable, too. + baseRef := core.GetVersionRef(ctx) // Append random bytes to the end of the head branch if it ends with a dash if strings.HasSuffix(headBranch, "-") { @@ -265,35 +280,43 @@ func (c *Generic) branchTransaction(ctx context.Context, headBranch string, opts } // Validate that the base and head branches are distinct - if baseBranch == headBranch { + if baseRef == headBranch { return nil, fmt.Errorf("head and target branches must not be the same") } - logrus.Debugf("Base branch: %q. Head branch: %q.", baseBranch, headBranch) + logrus.Debugf("Base VersionRef: %q. Head branch: %q.", baseRef, headBranch) // Parse options o := defaultTxOptions().ApplyOptions(opts) + // Resolve what the base commit is + baseCommit, _, err := c.versionRefResolver().ResolveVersionRef(baseRef) + if err != nil { + return nil, err + } + info := TxInfo{ - Base: baseBranch, - Head: headBranch, - Options: *o, + BaseCommit: baseCommit, + HeadBranch: headBranch, + Options: *o, } // Register the head branch with the context - ctxWithHeadBranch := core.WithVersionRef(ctx, core.NewBranchRef(headBranch)) + // TODO: We should register all of TxInfo here instead, or ...? + ctxWithHeadBranch := core.WithVersionRef(ctx, headBranch) // Initialize the transaction ctxWithDeadline, cleanupFunc := c.initTx(ctxWithHeadBranch, info) // Run pre-tx checks and create the new branch - err := utilerrs.NewAggregate([]error{ + if err := utilerrs.NewAggregate([]error{ c.TransactionHookChain().PreTransactionHook(ctxWithDeadline, info), c.manager.CreateBranch(ctxWithDeadline, headBranch), - }) + }); err != nil { + return nil, err + } - return &txBranchImpl{ + return &txImpl{ txCommon: &txCommon{ - err: err, c: c.c, manager: c.manager, commitHook: c.CommitHookChain(), @@ -301,6 +324,5 @@ func (c *Generic) branchTransaction(ctx context.Context, headBranch string, opts info: info, cleanupFunc: cleanupFunc, }, - merger: c.merger, }, nil } diff --git a/pkg/storage/client/transactional/distributed/client.go b/pkg/storage/client/transactional/distributed/client.go index 5a79b274..07817dc8 100644 --- a/pkg/storage/client/transactional/distributed/client.go +++ b/pkg/storage/client/transactional/distributed/client.go @@ -50,14 +50,24 @@ type Generic struct { branchLocksMu *sync.Mutex } +type branchLockKeyImpl struct{} + +var branchLockKey = branchLockKeyImpl{} + type branchLock struct { // mu should be write-locked whenever the branch is actively running any // function from the remote - mu *sync.RWMutex + // mu *sync.RWMutex // lastPull is guarded by mu, before reading, one should RLock mu lastPull time.Time } +/* + TxMode.AllowReads is incompatible with the PC/EC distributed mode, and might be with the PC/EL mode. + Ahh, just completely remove the AllowReads mode. If the person wants to do a read for a specific version + while a tx is going on for a branch, they just need to specify the direct commit. +*/ + func (c *Generic) Get(ctx context.Context, key core.ObjectKey, obj client.Object) error { return c.readWhenPossible(ctx, func() error { return c.Client.Get(ctx, key, obj) @@ -71,10 +81,14 @@ func (c *Generic) List(ctx context.Context, list client.ObjectList, opts ...clie } func (c *Generic) readWhenPossible(ctx context.Context, operation func() error) error { - branch := c.branchFromCtx(ctx) + ref := core.GetVersionRef(ctx) + // If the versionref is immutable, we can read directly. + if ref.IsImmutable() { + return operation() + } // Check if we need to do a pull before - if c.needsResync(branch, c.opts.CacheValidDuration) { + if c.needsResync(ref, c.opts.CacheValidDuration) { // Try to pull the remote branch. If it fails, use returnErr to figure out if // this (depending on the configured PACELC mode) is a critical error, or if we // should continue with the read @@ -88,7 +102,10 @@ func (c *Generic) readWhenPossible(ctx context.Context, operation func() error) return operation() } -func (c *Generic) getBranchLockInfo(branch string) *branchLock { +func (c *Generic) getBranchLockInfo(ref core.VersionRef) *branchLock { + // We "know" this is a "branch", as immutable references are no-ops in readWhenPossible + branch := ref.VersionRef() + c.branchLocksMu.Lock() defer c.branchLocksMu.Unlock() @@ -104,8 +121,8 @@ func (c *Generic) getBranchLockInfo(branch string) *branchLock { return c.branchLocks[branch] } -func (c *Generic) needsResync(branch string, d time.Duration) bool { - lck := c.getBranchLockInfo(branch) +func (c *Generic) needsResync(ref core.VersionRef, d time.Duration) bool { + lck := c.getBranchLockInfo(ref) // Lock while reading the last resync time lck.mu.RLock() defer lck.mu.RUnlock() @@ -162,9 +179,9 @@ func (c *Generic) resyncLoop(ctx context.Context, resyncCacheInterval time.Durat logrus.Info("Exiting the resync loop...") } -func (c *Generic) pull(ctx context.Context, branch string) error { +func (c *Generic) pull(ctx context.Context, ref core.VersionRef) error { // Need to get the branch-specific lock variable - lck := c.getBranchLockInfo(branch) + lck := c.getBranchLockInfo(ref) // Write-lock while this operation is in progress lck.mu.Lock() defer lck.mu.Unlock() @@ -174,7 +191,7 @@ func (c *Generic) pull(ctx context.Context, branch string) error { defer cancel() // Make a ctx for the given branch - ctxForBranch := core.WithVersionRef(pullCtx, core.NewBranchRef(branch)) + ctxForBranch := core.WithMutableVersionRef(pullCtx, branch) if err := c.remote.Pull(ctxForBranch); err != nil { return err } diff --git a/pkg/storage/client/transactional/distributed/git/filesystem.go b/pkg/storage/client/transactional/distributed/git/filesystem.go new file mode 100644 index 00000000..d05f38e3 --- /dev/null +++ b/pkg/storage/client/transactional/distributed/git/filesystem.go @@ -0,0 +1,66 @@ +package git + +import ( + "context" + "os" + "path/filepath" + + "github.com/go-git/go-git/v5" + "github.com/spf13/afero" +) + +type Filesystem struct { + git *goGit +} + +func (f *Filesystem) RootDirectory() string { + return f.rootDir +} + +func (f *Filesystem) Checksum(_ context.Context, filename string) (string, error) { + // Get the latest commit that is touching this file + ci, err := f.git.repo.Log(&git.LogOptions{ + Order: git.LogOrderCommitterTime, + FileName: &filename, + }) + if err != nil { + return "", err + } + commit, err := ci.Next() + if err != nil { + return "", err + } + return commit.Hash.String(), nil +} + +func (f *Filesystem) MkdirAll(_ context.Context, path string, perm os.FileMode) error { + return f.fs.MkdirAll(path, perm) +} + +func (f *Filesystem) Remove(_ context.Context, name string) error { + return f.fs.Remove(name) +} + +func (f *Filesystem) Stat(_ context.Context, name string) (os.FileInfo, error) { + return f.fs.Stat(name) +} + +func (f *Filesystem) ReadDir(_ context.Context, dirname string) ([]os.FileInfo, error) { + return afero.ReadDir(f.fs, dirname) +} + +func (f *Filesystem) Exists(_ context.Context, path string) (bool, error) { + return afero.Exists(f.fs, path) +} + +func (f *Filesystem) ReadFile(_ context.Context, filename string) ([]byte, error) { + return afero.ReadFile(f.fs, filename) +} + +func (f *Filesystem) WriteFile(_ context.Context, filename string, data []byte, perm os.FileMode) error { + return afero.WriteFile(f.fs, filename, data, perm) +} + +func (f *Filesystem) Walk(_ context.Context, root string, walkFn filepath.WalkFunc) error { + return afero.Walk(f.fs, root, walkFn) +} diff --git a/pkg/storage/client/transactional/distributed/git/git.go b/pkg/storage/client/transactional/distributed/git/git.go index 482e4110..c825634a 100644 --- a/pkg/storage/client/transactional/distributed/git/git.go +++ b/pkg/storage/client/transactional/distributed/git/git.go @@ -26,11 +26,11 @@ var ( ErrWorktreeNotClean = errors.New("there are uncommitted changes, cannot create new branch") ) -// LocalClone is an implementation of both a Remote, and a BranchManager, for Git. -var _ transactional.BranchManager = &LocalClone{} +// LocalClone is an implementation of both a Remote, and a TransactionManager, for Git. +var _ transactional.TransactionManager = &LocalClone{} var _ distributed.Remote = &LocalClone{} -// Create a new Remote and BranchManager implementation using Git. The repo is cloned immediately +// Create a new Remote and TransactionManager implementation using Git. The repo is cloned immediately // in the constructor, you can use ctx to enforce a timeout for the clone. func NewLocalClone(ctx context.Context, repoRef gitprovider.RepositoryRef, opts ...Option) (*LocalClone, error) { log.Info("Initializing the Git repo...") @@ -67,7 +67,7 @@ func NewLocalClone(ctx context.Context, repoRef gitprovider.RepositoryRef, opts return d, nil } -// LocalClone is an implementation of both a Remote, and a BranchManager, for Git. +// LocalClone is an implementation of both a Remote, and a TransactionManager, for Git. // TODO: Make so that the LocalClone does NOT interfere with any reads or writes by the Client using some shared // mutex. type LocalClone struct { @@ -162,7 +162,7 @@ func (d *LocalClone) CreateBranch(ctx context.Context, branch string) error { d.lock.Lock() defer d.lock.Unlock() - // TODO: Should the caller do a force-reset using ResetToCleanBranch before creating the branch? + // TODO: Should the caller do a force-reset using ResetToCleanVersion before creating the branch? // Make sure it's okay to write if err := d.verifyWrite(); err != nil { @@ -180,7 +180,7 @@ func (d *LocalClone) CreateBranch(ctx context.Context, branch string) error { return d.impl.CheckoutBranch(ctx, branch, false, true) } -func (d *LocalClone) ResetToCleanBranch(ctx context.Context, branch string) error { +func (d *LocalClone) ResetToCleanVersion(ctx context.Context, branch string) error { // Lock the mutex now that we're starting, and unlock it when exiting d.lock.Lock() defer d.lock.Unlock() diff --git a/pkg/storage/client/transactional/distributed/git/gogit.go b/pkg/storage/client/transactional/distributed/git/gogit.go index 46dfd16c..01ae34a7 100644 --- a/pkg/storage/client/transactional/distributed/git/gogit.go +++ b/pkg/storage/client/transactional/distributed/git/gogit.go @@ -65,13 +65,13 @@ func (g *goGit) clone(ctx context.Context) error { Tags: git.NoTags, } if g.opts.MainBranch != "" { - cloneOpts.ReferenceName = plumbing.NewBranchReferenceName(g.opts.MainBranch) + cloneOpts.ReferenceName = plumbing.NewMutableVersionReferenceName(g.opts.MainBranch) } log.Infof("Starting to clone the repository %s", g.repoRef) // Do a clone operation to the temporary directory var err error - g.repo, err = git.PlainCloneContext(ctx, g.dir, false, cloneOpts) + g.repo, err = git.PlainCloneContext(ctx, g.dir, true, cloneOpts) // Handle errors if errors.Is(err, context.DeadlineExceeded) { return fmt.Errorf("git clone operation timed out: %w", err) @@ -177,7 +177,7 @@ func (g *goGit) Fetch(ctx context.Context, revision string) error { func (g *goGit) CheckoutBranch(ctx context.Context, branch string, force, create bool) error { return g.wt.Checkout(&git.CheckoutOptions{ - Branch: plumbing.NewBranchReferenceName(branch), + Branch: plumbing.NewMutableVersionReferenceName(branch), Force: true, Create: create, }) @@ -259,7 +259,7 @@ func (g *goGit) CommitAt(_ context.Context, branch string) (rev string, err erro if branch != "" { // Point at HEAD reference, err = g.repo.Head() } else { - reference, err = g.repo.Reference(plumbing.NewBranchReferenceName(branch), true) + reference, err = g.repo.Reference(plumbing.NewMutableVersionReferenceName(branch), true) } if err != nil { return diff --git a/pkg/storage/client/transactional/distributed/git/transport.go b/pkg/storage/client/transactional/distributed/git/transport.go index 95866999..3ce8411b 100644 --- a/pkg/storage/client/transactional/distributed/git/transport.go +++ b/pkg/storage/client/transactional/distributed/git/transport.go @@ -17,6 +17,8 @@ type AuthMethod interface { transport.AuthMethod // TransportType defines what transport type should be used with this method TransportType() gitprovider.TransportType + // AuthMethod also implements the option interface + Option } // NewSSHAuthMethod creates a new AuthMethod for the Git SSH protocol, using a given diff --git a/pkg/storage/client/transactional/handlers.go b/pkg/storage/client/transactional/handlers.go index 0b38adea..6d4bb6ba 100644 --- a/pkg/storage/client/transactional/handlers.go +++ b/pkg/storage/client/transactional/handlers.go @@ -1,11 +1,15 @@ package transactional -import "context" +import ( + "context" + + "github.com/weaveworks/libgitops/pkg/storage/core" +) type TxInfo struct { - Base string - Head string - Options TxOptions + BaseCommit core.Commit + HeadBranch string + Options TxOptions } type CommitHookChain interface { @@ -76,7 +80,7 @@ type TransactionHookChain interface { // of the result of the transaction; these will always be run. type TransactionHook interface { // PreTransactionHook executes before CreateBranch has been called for the - // BranchManager in BranchTx mode; and in any case before any user-tx-specific + // TransactionManager in BranchTx mode; and in any case before any user-tx-specific // code starts executing. PreTransactionHook(ctx context.Context, info TxInfo) error // PostTransactionHook executes when a transaction is terminated, either due diff --git a/pkg/storage/client/transactional/interfaces.go b/pkg/storage/client/transactional/interfaces.go index 3485194d..52b6c7eb 100644 --- a/pkg/storage/client/transactional/interfaces.go +++ b/pkg/storage/client/transactional/interfaces.go @@ -10,9 +10,12 @@ import ( type Client interface { client.Reader - BranchManager() BranchManager + TransactionManager() TransactionManager + // KeyedLock is used for locking operations targeting branches + //KeyedLock() syncutil.NamedLockMap + // BranchMerger is optional. - BranchMerger() BranchMerger + //BranchMerger() BranchMerger // CommitHookChain is a chain of hooks that are run before and after a commit is made. CommitHookChain() CommitHookChain @@ -25,24 +28,28 @@ type Client interface { // BranchTransaction creates a new "head" branch with the given {branchName} name, based // on the "base" branch in the context. The "base" branch is not locked for writing while // the transaction is running, but the head branch is. - BranchTransaction(ctx context.Context, branchName string, opts ...TxOption) BranchTx + BranchTransaction(ctx context.Context, branchName string, opts ...TxOption) Tx } -type BranchManager interface { +type TransactionManager interface { // CreateBranch creates a new branch with the given target branch name. It forks out // of the branch specified in the context. CreateBranch(ctx context.Context, branch string) error - // ResetToCleanBranch switches back to the given branch; but first discards all non-committed + // ResetToCleanVersion switches back to the given branch; but first discards all non-committed // changes. - ResetToCleanBranch(ctx context.Context, branch string) error + //ResetToCleanVersion(ctx context.Context, ref core.VersionRef) error // Commit creates a new commit for the branch stored in the context. Commit(ctx context.Context, commit Commit) error -} -type BranchMerger interface { - MergeBranches(ctx context.Context, base, head string, commit Commit) error + /*// LockVersionRef takes the VersionRef attached in the context, and makes sure that it is + // "locked" to the current commit for a given branch. + LockVersionRef(ctx context.Context) (context.Context, error)*/ } +/*type BranchMerger interface { + MergeBranches(ctx context.Context, base, head core.VersionRef, commit Commit) error +}*/ + type CustomTxFunc func(ctx context.Context) error type Tx interface { @@ -66,7 +73,7 @@ type Tx interface { PatchStatus(obj client.Object, patch client.Patch, opts ...client.PatchOption) Tx } -type BranchTx interface { +/*type BranchTx interface { CreateTx(Commit) BranchTxResult Abort(err error) error @@ -90,4 +97,4 @@ type BranchTx interface { type BranchTxResult interface { Error() error MergeWithBase(Commit) error -} +}*/ diff --git a/pkg/storage/client/transactional/options.go b/pkg/storage/client/transactional/options.go index 6b3679c2..5450d1e1 100644 --- a/pkg/storage/client/transactional/options.go +++ b/pkg/storage/client/transactional/options.go @@ -11,22 +11,22 @@ var _ TxOption = &TxOptions{} func defaultTxOptions() *TxOptions { return &TxOptions{ Timeout: 1 * time.Minute, - Mode: TxModeAtomic, + //Mode: TxModeAtomic, } } type TxOptions struct { Timeout time.Duration - Mode TxMode + //Mode TxMode } func (o *TxOptions) ApplyToTx(target *TxOptions) { if o.Timeout != 0 { target.Timeout = o.Timeout } - if len(o.Mode) != 0 { + /*if len(o.Mode) != 0 { target.Mode = o.Mode - } + }*/ } func (o *TxOptions) ApplyOptions(opts []TxOption) *TxOptions { @@ -36,7 +36,7 @@ func (o *TxOptions) ApplyOptions(opts []TxOption) *TxOptions { return o } -var _ TxOption = TxMode("") +/*var _ TxOption = TxMode("") type TxMode string @@ -55,7 +55,7 @@ const ( func (m TxMode) ApplyToTx(target *TxOptions) { target.Mode = m -} +}*/ var _ TxOption = TxTimeout(0) diff --git a/pkg/storage/client/transactional/tx_branch.go b/pkg/storage/client/transactional/tx_branch.go index c7011a36..851bfd0b 100644 --- a/pkg/storage/client/transactional/tx_branch.go +++ b/pkg/storage/client/transactional/tx_branch.go @@ -1,10 +1,6 @@ package transactional -import ( - "context" - "fmt" -) - +/* type txBranchImpl struct { *txCommon @@ -26,9 +22,9 @@ func (tx *txBranchImpl) CreateTx(c Commit) BranchTxResult { // Allow the merger to merge, if supported return &txResultImpl{ - err: cleanupErr, - ctx: tx.ctx, - merger: tx.merger, + err: cleanupErr, + ctx: tx.ctx, + //merger: tx.merger, baseBranch: tx.info.Base, headBranch: tx.info.Head, } @@ -46,11 +42,11 @@ func newErrTxResult(err error) *txResultImpl { } type txResultImpl struct { - err error - ctx context.Context - merger BranchMerger - baseBranch string - headBranch string + err error + ctx context.Context + //merger BranchMerger + baseBranch core.VersionRef + headBranch core.VersionRef } func (r *txResultImpl) Error() error { @@ -69,3 +65,4 @@ func (r *txResultImpl) MergeWithBase(c Commit) error { // Try to merge the branch return r.merger.MergeBranches(r.ctx, r.baseBranch, r.headBranch, c) } +*/ diff --git a/pkg/storage/client/transactional/tx_common.go b/pkg/storage/client/transactional/tx_common.go index 7c33849b..0229f24d 100644 --- a/pkg/storage/client/transactional/tx_common.go +++ b/pkg/storage/client/transactional/tx_common.go @@ -12,7 +12,7 @@ type txFunc func() error type txCommon struct { err error c client.Client - manager BranchManager + manager TransactionManager commitHook CommitHook ctx context.Context ops []txFunc diff --git a/pkg/storage/client/transactional/tx_ops.go b/pkg/storage/client/transactional/tx_ops.go index 766d0fd4..b87f3d33 100644 --- a/pkg/storage/client/transactional/tx_ops.go +++ b/pkg/storage/client/transactional/tx_ops.go @@ -58,6 +58,7 @@ func (tx *txImpl) PatchStatus(obj client.Object, patch client.Patch, opts ...cli }) } +/* // Implement the required "fluent/functional" methods on BranchTx. // Go doesn't have generics; hence we need to do this twice. @@ -108,3 +109,4 @@ func (tx *txBranchImpl) PatchStatus(obj client.Object, patch client.Patch, opts return nil // TODO tx.c.Status().Patch(ctx, obj, patch, opts...) }) } +*/ diff --git a/pkg/storage/client/transactional/utils.go b/pkg/storage/client/transactional/utils.go index 4c3f6cfa..76a9c41d 100644 --- a/pkg/storage/client/transactional/utils.go +++ b/pkg/storage/client/transactional/utils.go @@ -33,3 +33,9 @@ func randomSHA(byteLen int) (string, error) { } return hex.EncodeToString(b), nil } + +/*type BranchLocker struct { + KeyedLock sync.KeyedLock +} + +func (l *BranchLocker) */ diff --git a/pkg/storage/core/interfaces.go b/pkg/storage/core/interfaces.go index 75b5b6a5..108216b1 100644 --- a/pkg/storage/core/interfaces.go +++ b/pkg/storage/core/interfaces.go @@ -44,13 +44,30 @@ type ObjectID interface { GroupVersionKind() GroupVersionKind } -// VersionRef is an interface that describes a reference to a specific version (for now; branch) +/*// VersionRef is an interface that describes a reference to a specific version (for now; branch) // of Objects in a Storage or Client. type VersionRef interface { - // Branch returns the branch name. - Branch() string + // VersionRef returns the version reference, e.g. a branch name or a commit hash. + VersionRef() string // IsZeroValue determines if this VersionRef is the "zero value", which means // that the caller should figure out how to handle that the user did not // give specific opinions of what version of the Object to get. IsZeroValue() bool + // IsImmutable determines if the given version reference is immutable, i.e. cannot be modified. + IsImmutable() bool } + +type LockedVersionRef interface { + VersionRef + + ImmutableRef() VersionRef +} + +type MutableVersionRef interface { + MutableRefName() string + IsDefault() bool +} + +type ImmutableVersionRef interface { + ImmutableHash() string +}*/ diff --git a/pkg/storage/core/versionref.go b/pkg/storage/core/versionref.go index 9598064e..fe8591c4 100644 --- a/pkg/storage/core/versionref.go +++ b/pkg/storage/core/versionref.go @@ -4,6 +4,18 @@ import ( "context" ) +type VersionRefResolver interface { + //IsImmutable(ref string) (bool, error) + // Turns a branch name into a commit hash. If ref already is an existing commit, this is a no-op. + ResolveVersionRef(ref string) (c Commit, immutableRef bool, err error) +} + +type Commit string + +/*type VersionRef2 string + + */ + var versionRefKey = versionRefKeyImpl{} type versionRefKeyImpl struct{} @@ -12,32 +24,78 @@ type versionRefKeyImpl struct{} // overwrites if one already exists in ctx). The key for the ref // is private in this package, so one must use this function to // register it. -func WithVersionRef(ctx context.Context, ref VersionRef) context.Context { +func WithVersionRef(ctx context.Context, ref string) context.Context { return context.WithValue(ctx, versionRefKey, ref) } // GetVersionRef returns the VersionRef attached to this context. // If there is no attached VersionRef, or it is nil, a BranchRef // with branch "" will be returned as the "zero value" of VersionRef. -func GetVersionRef(ctx context.Context) VersionRef { - r, ok := ctx.Value(versionRefKey).(VersionRef) +func GetVersionRef(ctx context.Context) string { + r, ok := ctx.Value(versionRefKey).(string) // Return default ref if none specified - if r == nil || !ok { - return NewBranchRef("") + if !ok { + return "" } return r } -// NewBranchRef creates a new VersionRef for a given branch. It is +/* +// NewMutableVersionRef creates a new VersionRef for a given branch. It is // valid for the branch to be ""; in this case it means the "zero // value", or unspecified branch to be more precise, where the caller // can choose how to handle. -func NewBranchRef(branch string) VersionRef { return branchRef{branch} } +func NewMutableVersionRef(ref string) VersionRef { + return versionRef{ + ref: ref, + immutable: false, + } +} + +func WithMutableVersionRef(ctx context.Context, ref string) context.Context { + return WithVersionRef(ctx, NewMutableVersionRef(ref)) +} -type branchRef struct{ branch string } +func NewImmutableVersionRef(ref string) VersionRef { + return versionRef{ + ref: ref, + immutable: false, + } +} -func (r branchRef) Branch() string { return r.branch } +func WithImmutableVersionRef(ctx context.Context, ref string) context.Context { + return WithVersionRef(ctx, NewImmutableVersionRef(ref)) +} + +type versionRef struct { + ref string + immutable bool +} + +func (r versionRef) VersionRef() string { return r.ref } // A branch is considered the zero value if the branch is an empty string, // which it is e.g. when there was no VersionRef associated with a Context. -func (r branchRef) IsZeroValue() bool { return r.branch == "" } +func (r versionRef) IsZeroValue() bool { return r.ref == "" } + +func (r versionRef) IsImmutable() bool { return r.immutable } + +func NewLockedVersionRef(mutable, immutable VersionRef) LockedVersionRef { + if !immutable.IsImmutable() { + panic("NewLockedVersionRef: immutable VersionRef must be immutable") + } + return lockedVersionRef{ + mutable: mutable, + immutable: immutable, + } +} + +type lockedVersionRef struct { + mutable, immutable VersionRef +} + +func (r lockedVersionRef) VersionRef() string { return r.mutable.VersionRef() } +func (r lockedVersionRef) IsZeroValue() bool { return r.mutable.IsZeroValue() } +func (r lockedVersionRef) IsImmutable() bool { return r.mutable.IsImmutable() } +func (r lockedVersionRef) ImmutableRef() VersionRef { return r.immutable } +*/ diff --git a/pkg/storage/filesystem/filesystem.go b/pkg/storage/filesystem/filesystem.go index f523e7b4..2d21f9c6 100644 --- a/pkg/storage/filesystem/filesystem.go +++ b/pkg/storage/filesystem/filesystem.go @@ -7,6 +7,7 @@ import ( "strconv" "github.com/spf13/afero" + "github.com/weaveworks/libgitops/pkg/storage/core" ) // Filesystem extends afero.Fs and afero.Afero with contexts added to every method. @@ -56,6 +57,8 @@ type Filesystem interface { // This path MUST be absolute. All other paths for the other methods // MUST be relative to this directory. RootDirectory() string + + VersionRefResolver() core.VersionRefResolver } // NewOSFilesystem creates a new afero.OsFs for the local directory, using @@ -123,6 +126,10 @@ func (f *filesystem) Walk(_ context.Context, root string, walkFn filepath.WalkFu return afero.Walk(f.fs, root, walkFn) } +func (f *filesystem) VersionRefResolver() core.VersionRefResolver { + return nil +} + func checksumFromFileInfo(fi os.FileInfo) string { return strconv.FormatInt(fi.ModTime().UnixNano(), 10) } diff --git a/pkg/storage/filesystem/storage.go b/pkg/storage/filesystem/storage.go index 57945f9c..d3a6b4ac 100644 --- a/pkg/storage/filesystem/storage.go +++ b/pkg/storage/filesystem/storage.go @@ -44,6 +44,10 @@ func (r *Generic) FileFinder() FileFinder { return r.fileFinder } +func (r *Generic) VersionRefResolver() core.VersionRefResolver { + return r.fileFinder.Filesystem().VersionRefResolver() +} + func (r *Generic) Read(ctx context.Context, id core.UnversionedObjectID) ([]byte, error) { // Get the path and verify namespacing info p, err := r.getPath(ctx, id) diff --git a/pkg/storage/filesystem/unstructured/btree/btree_cache_test.go b/pkg/storage/filesystem/unstructured/btree/btree_cache_test.go new file mode 100644 index 00000000..dc9dc841 --- /dev/null +++ b/pkg/storage/filesystem/unstructured/btree/btree_cache_test.go @@ -0,0 +1,84 @@ +package btree + +/* + +func Test_strItem_Less_key(t *testing.T) { + tests := []struct { + str string + cmp btree.Item + want bool + }{ + {"", &key{objectID: objectID{core.GroupKind{Group: "foo", Kind: "bar"}, core.ObjectKey{Name: "bar"}}}, true}, + } + for i, tt := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + if got := strItem(tt.str).Less(tt.cmp); got != tt.want { + t.Errorf("strItem.Less() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_key_String(t *testing.T) { + tests := []struct { + objectID objectID + want string + }{ + {objID("foo.com", "Bar", "baz", ""), "key:f6377908"}, + } + for i, tt := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + k := &key{objectID: tt.objectID} + if got := k.String(); got != tt.want { + t.Errorf("key.String() = %v, want %v", got, tt.want) + } + }) + } +} + +func objID(group, kind, ns, name string) objectID { + return objectID{Kind: core.GroupKind{Group: group, Kind: kind}, Key: core.ObjectKey{Name: name, Namespace: ns}} +} + +type items []ItemQuery + +// find returns the index where the given item should be inserted into this +// list. 'found' is true if the item already exists in the list at the given +// index. +func (s items) find(item ItemQuery) (index int, found bool) { + i := sort.Search(len(s), func(i int) bool { + return item.Less(s[i]) + }) + if i > 0 && !s[i-1].Less(item) { + return i - 1, true + } + return i, false +} + +func Test_items_find(t *testing.T) { + tests := []struct { + list []ItemQuery + item ItemQuery + wantIndex int + wantFound bool + }{ + { + list: []ItemQuery{strItem("cc:bb"), strItem("foo:aa:kk"), strItem("foo:bb:kk"), strItem("foo:cc:kk"), strItem("foo:cc")}, + item: strItem("foo:"), + wantIndex: 1, + wantFound: false, + }, + } + for i, tt := range tests { + t.Run(strconv.Itoa(i), func(t *testing.T) { + gotIndex, gotFound := items(tt.list).find(tt.item) + if gotIndex != tt.wantIndex { + t.Errorf("items.find() gotIndex = %v, want %v", gotIndex, tt.wantIndex) + } + if gotFound != tt.wantFound { + t.Errorf("items.find() gotFound = %v, want %v", gotFound, tt.wantFound) + } + }) + } +} +*/ diff --git a/pkg/storage/filesystem/unstructured/tx/tx.go b/pkg/storage/filesystem/unstructured/tx/tx.go index 567d1f14..5593ebdf 100644 --- a/pkg/storage/filesystem/unstructured/tx/tx.go +++ b/pkg/storage/filesystem/unstructured/tx/tx.go @@ -30,11 +30,11 @@ type unstructuredStorageTxHandler struct { } func (h *unstructuredStorageTxHandler) PreTransactionHook(ctx context.Context, info transactional.TxInfo) error { - head := core.NewBranchRef(info.Head) + head := core.NewMutableVersionRef(info.Head) if h.fileFinder.HasVersionRef(head) { return nil // head exists, no-op } - base := core.NewBranchRef(info.Base) + base := core.NewMutableVersionRef(info.Base) // If both head and base are the same, and we know that head does not exist in the system, we need to create // head "from scratch" as a "root version" if info.Head == info.Base { diff --git a/pkg/storage/interfaces.go b/pkg/storage/interfaces.go index 3d260603..60e2bcb5 100644 --- a/pkg/storage/interfaces.go +++ b/pkg/storage/interfaces.go @@ -34,6 +34,7 @@ type Storage interface { // StorageCommon is an interface that contains the resources both needed // by Reader and Writer. type StorageCommon interface { + VersionRefResolver() core.VersionRefResolver // Namespacer gives access to the namespacer that is used Namespacer() Namespacer // Exists checks if the resource indicated by the ID exists. diff --git a/pkg/util/sync/lock.go b/pkg/util/sync/lock.go new file mode 100644 index 00000000..488046bb --- /dev/null +++ b/pkg/util/sync/lock.go @@ -0,0 +1,107 @@ +package sync + +import ( + "sync" +) + +type NamedLockMap interface { + LockByName(name string) LockWithData +} + +type LockWithData interface { + Load(key interface{}) (value interface{}, ok bool) + QLoad(key interface{}) interface{} + + // These automatically do a Lock()/Unlock() when executing + LoadOrStore(key, value interface{}) (actual interface{}, loaded bool) + QLoadOrStore(key, value interface{}) interface{} + Store(key, value interface{}) + + sync.Locker + + RLocker() sync.Locker + RLock() + RUnlock() + + /*RLock(key string) KeyedLockGetter + RUnlock(key string) + + Lock(key string) KeyedLockSetter + Unlock(key string)*/ +} + +/*type KeyedLockGetter interface { + Get(key interface{}) interface{} +} + +type KeyedLockSetter interface { + KeyedLockGetter + Set(key, value interface{}) +}*/ + +func NewNamedLockMap() NamedLockMap { + return &namedLockMap{ + locks: make(map[string]*lockWithData), + locksMu: &sync.Mutex{}, + } +} + +type namedLockMap struct { + // locks maps keys to their individual locks and associated data + locks map[string]*lockWithData + // locksMu guards reads and writes of the locks map + locksMu *sync.Mutex +} + +func (l *namedLockMap) LockByName(name string) LockWithData { + // l.locksMu guards reads and writes of the c.locks map + l.locksMu.Lock() + defer l.locksMu.Unlock() + + // Check if information about a transaction on this branch exists. + txState, ok := l.locks[name] + if ok { + return txState + } + // if not, grow the txs map by one and return it + l.locks[name] = &lockWithData{ + RWMutex: &sync.RWMutex{}, + Map: &sync.Map{}, + } + return l.locks[name] +} + +type lockWithData struct { + *sync.RWMutex + *sync.Map + //data map[interface{}]interface{} +} + +func (l *lockWithData) QLoad(key interface{}) interface{} { + value, _ := l.Map.Load(key) + return value +} + +func (l *lockWithData) QLoadOrStore(key, value interface{}) interface{} { + actual, _ := l.Map.LoadOrStore(key, value) + return actual +} + +/* +func (l *lockWithData) RLock() { l.mu.RLock() } +func (l *lockWithData) RUnlock() { l.mu.RUnlock() } +func (l *lockWithData) Lock() { l.mu.Lock() } +func (l *lockWithData) Unlock() { l.mu.Unlock() } +*/ + +/*func (l *lockWithData) Get(key interface{}) interface{} { + return l.data[key] +} + +type writableLockWithData struct { + *lockWithData +} + +func (l *writableLockWithData) Set(key, value interface{}) { + l.data[key] = value +}*/ From 0bd8db60e558615981f99e951b269c8a9c376eb8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Tue, 27 Jul 2021 14:47:00 +0300 Subject: [PATCH 141/149] Merge v2_framing code --- go.mod | 22 +- go.sum | 293 +++-------- pkg/content/constructors.go | 142 +++++ pkg/content/errors.go | 43 ++ pkg/content/interfaces.go | 139 +++++ pkg/content/metadata.go | 101 ++++ pkg/content/metadata/metadata.go | 157 ++++++ pkg/content/metadata/metadata_test.go | 125 +++++ pkg/content/reader.go | 244 +++++++++ pkg/content/reader_test.go | 62 +++ pkg/content/recognizing.go | 203 +++++++ pkg/content/recognizing_reader_test.go | 96 ++++ pkg/content/recognizing_test.go | 69 +++ pkg/content/segment_reader.go | 63 +++ pkg/content/tracing.go | 33 ++ pkg/content/writer.go | 121 +++++ pkg/frame/constructors.go | 104 ++++ pkg/frame/errors.go | 38 ++ pkg/frame/interfaces.go | 160 ++++++ pkg/frame/k8s_reader_streaming.go | 110 ++++ pkg/frame/k8s_reader_yaml.go | 130 +++++ pkg/frame/options.go | 153 ++++++ pkg/frame/options_boilerplate.go | 114 ++++ pkg/frame/options_test.go | 153 ++++++ pkg/frame/reader.go | 113 ++++ pkg/frame/reader_factory.go | 74 +++ pkg/frame/reader_factory_test.go | 60 +++ pkg/frame/reader_streaming.go | 115 ++++ pkg/frame/reader_test.go | 526 +++++++++++++++++++ pkg/frame/sanitize/comments/LICENSE | 201 +++++++ pkg/frame/sanitize/comments/comments.go | 117 +++++ pkg/frame/sanitize/comments/comments_test.go | 370 +++++++++++++ pkg/frame/sanitize/comments/lost.go | 118 +++++ pkg/frame/sanitize/sanitize.go | 220 ++++++++ pkg/frame/sanitize/sanitize_test.go | 460 ++++++++++++++++ pkg/frame/single.go | 48 ++ pkg/frame/utils.go | 78 +++ pkg/frame/utils_test.go | 119 +++++ pkg/frame/writer.go | 76 +++ pkg/frame/writer_delegate.go | 58 ++ pkg/frame/writer_factory.go | 50 ++ pkg/frame/writer_test.go | 34 ++ pkg/tracing/logging.go | 133 +++++ pkg/tracing/tracer_provider.go | 248 +++++++++ pkg/tracing/tracing.go | 244 +++++++++ pkg/tracing/tracing_test.go | 65 +++ pkg/util/compositeio/compositeio.go | 38 ++ pkg/util/limitedio/limitedio.go | 178 +++++++ pkg/util/structerr/structerr.go | 13 + 49 files changed, 6395 insertions(+), 236 deletions(-) create mode 100644 pkg/content/constructors.go create mode 100644 pkg/content/errors.go create mode 100644 pkg/content/interfaces.go create mode 100644 pkg/content/metadata.go create mode 100644 pkg/content/metadata/metadata.go create mode 100644 pkg/content/metadata/metadata_test.go create mode 100644 pkg/content/reader.go create mode 100644 pkg/content/reader_test.go create mode 100644 pkg/content/recognizing.go create mode 100644 pkg/content/recognizing_reader_test.go create mode 100644 pkg/content/recognizing_test.go create mode 100644 pkg/content/segment_reader.go create mode 100644 pkg/content/tracing.go create mode 100644 pkg/content/writer.go create mode 100644 pkg/frame/constructors.go create mode 100644 pkg/frame/errors.go create mode 100644 pkg/frame/interfaces.go create mode 100644 pkg/frame/k8s_reader_streaming.go create mode 100644 pkg/frame/k8s_reader_yaml.go create mode 100644 pkg/frame/options.go create mode 100644 pkg/frame/options_boilerplate.go create mode 100644 pkg/frame/options_test.go create mode 100644 pkg/frame/reader.go create mode 100644 pkg/frame/reader_factory.go create mode 100644 pkg/frame/reader_factory_test.go create mode 100644 pkg/frame/reader_streaming.go create mode 100644 pkg/frame/reader_test.go create mode 100644 pkg/frame/sanitize/comments/LICENSE create mode 100644 pkg/frame/sanitize/comments/comments.go create mode 100644 pkg/frame/sanitize/comments/comments_test.go create mode 100644 pkg/frame/sanitize/comments/lost.go create mode 100644 pkg/frame/sanitize/sanitize.go create mode 100644 pkg/frame/sanitize/sanitize_test.go create mode 100644 pkg/frame/single.go create mode 100644 pkg/frame/utils.go create mode 100644 pkg/frame/utils_test.go create mode 100644 pkg/frame/writer.go create mode 100644 pkg/frame/writer_delegate.go create mode 100644 pkg/frame/writer_factory.go create mode 100644 pkg/frame/writer_test.go create mode 100644 pkg/tracing/logging.go create mode 100644 pkg/tracing/tracer_provider.go create mode 100644 pkg/tracing/tracing.go create mode 100644 pkg/tracing/tracing_test.go create mode 100644 pkg/util/compositeio/compositeio.go create mode 100644 pkg/util/limitedio/limitedio.go create mode 100644 pkg/util/structerr/structerr.go diff --git a/go.mod b/go.mod index c1a3f249..6d518c20 100644 --- a/go.mod +++ b/go.mod @@ -5,34 +5,36 @@ go 1.15 replace github.com/docker/distribution => github.com/docker/distribution v2.7.1+incompatible require ( - github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 // indirect - github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 // indirect github.com/evanphx/json-patch v4.11.0+incompatible github.com/fluxcd/go-git-providers v0.2.0 github.com/fluxcd/pkg/ssh v0.2.0 github.com/go-git/go-git/v5 v5.4.2 + github.com/go-logr/logr v0.4.0 github.com/go-openapi/spec v0.20.3 - github.com/go-openapi/strfmt v0.19.5 // indirect - github.com/go-openapi/validate v0.19.8 // indirect github.com/google/btree v1.0.1 github.com/google/go-github/v32 v32.1.0 github.com/labstack/echo v3.3.10+incompatible github.com/labstack/gommon v0.3.0 // indirect - github.com/markbates/pkger v0.17.1 // indirect github.com/mattn/go-isatty v0.0.13 // indirect github.com/mitchellh/go-homedir v1.1.0 - github.com/qri-io/starlib v0.4.2-0.20200213133954-ff2e8cd5ef8d // indirect github.com/rjeczalik/notify v0.9.2 github.com/sirupsen/logrus v1.8.1 github.com/spf13/afero v1.6.0 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.7.0 - go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee // indirect + go.opentelemetry.io/otel v1.0.0-RC2 + go.opentelemetry.io/otel/exporters/jaeger v1.0.0-RC2 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.0-RC2 + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.0.0-RC2 + go.opentelemetry.io/otel/sdk v1.0.0-RC2 + go.opentelemetry.io/otel/trace v1.0.0-RC2 + go.uber.org/multierr v1.6.0 + go.uber.org/zap v1.17.0 golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 - gotest.tools v2.2.0+incompatible // indirect k8s.io/apimachinery v0.21.2 k8s.io/kube-openapi v0.0.0-20210527164424-3c818078ee3d k8s.io/utils v0.0.0-20210527160623-6fdb442a123b - sigs.k8s.io/controller-runtime v0.9.1 - sigs.k8s.io/kustomize/kyaml v0.10.21 + sigs.k8s.io/controller-runtime v0.9.3 + sigs.k8s.io/kustomize/kyaml v0.11.1-0.20210721155208-d6ce84604738 + sigs.k8s.io/yaml v1.2.0 ) diff --git a/go.sum b/go.sum index d60d6a0e..77e8a284 100644 --- a/go.sum +++ b/go.sum @@ -6,7 +6,6 @@ cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxK cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= @@ -23,27 +22,14 @@ cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiy cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/360EntSecGroup-Skylar/excelize v1.4.1/go.mod h1:vnax29X2usfl7HHkBrX5EvSCJcmH3dT9luvxzu8iGAE= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= @@ -54,35 +40,26 @@ github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMo github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 h1:YoJbenK9C67SkzkDfmQuVln04ygHj3vjZfd9FL+GmQQ= github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= -github.com/PuerkitoBio/goquery v1.5.0/go.mod h1:qD2PgZ9lccMbQlc7eEOjaeRlFQON7xY8kdmcsrnKqMg= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk= github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= -github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= -github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs= -github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= @@ -90,8 +67,9 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= @@ -101,6 +79,9 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -121,34 +102,29 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustmop/soup v1.1.2-0.20190516214245-38228baa104e/go.mod h1:CgNC6SGbT+Xb8wGGvzilttZL1mc5sQ/5KkcxsZttMIk= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= -github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fluxcd/go-git-providers v0.0.3 h1:pquQvTpd1a4V1efPyZWuVPeIKrTgV8QRoDY0VGH+qiw= -github.com/fluxcd/go-git-providers v0.0.3/go.mod h1:iaXf3nEq8MB/LzxfbNcCl48sAtIReUU7jqjJ7CEnfFQ= github.com/fluxcd/go-git-providers v0.2.0 h1:2dxT4r9UDjKwsNFmO9wcSR2FUqKyvsDwha5b/zvK1Ko= github.com/fluxcd/go-git-providers v0.2.0/go.mod h1:nRgNpHZmZhrsyNSma1JcAhjUG9xrqMGJcIUr9K7M7vk= -github.com/fluxcd/pkg/ssh v0.0.5 h1:rnbFZ7voy2JBlUfMbfyqArX2FYaLNpDhccGFC3qW83A= -github.com/fluxcd/pkg/ssh v0.0.5/go.mod h1:7jXPdXZpc0ttMNz2kD9QuMi3RNn/e0DOFbj0Tij/+Hs= github.com/fluxcd/pkg/ssh v0.2.0 h1:e9V+HReOL7czm7edVzYS1e+CnFKz1/kHiUNfLRpBdH8= github.com/fluxcd/pkg/ssh v0.2.0/go.mod h1:EpQC7Ztdlbi8S/dlYXqVDZtHtLpN3FNl3N6zWujVzbA= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= @@ -157,26 +133,18 @@ github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoD github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= -github.com/go-git/go-billy/v5 v5.0.0 h1:7NQHvd9FVid8VL4qVUMm8XifBK+2xCoZ2lSk0agRrHM= -github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34= github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12 h1:PbKy9zOy4aAKrJ5pibIRpVO2BXnK1Tlcg+caKI7Ox5M= -github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw= +github.com/go-git/go-git-fixtures/v4 v4.2.1 h1:n9gGL1Ct/yIw+nfsfr8s4+sbhT+Ncu2SubfXjIWgci8= github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0= -github.com/go-git/go-git/v5 v5.2.0 h1:YPBLG/3UK1we1ohRkncLjaXWLW+HKp5QNM/jTli2JgI= -github.com/go-git/go-git/v5 v5.2.0/go.mod h1:kh02eMX+wdqqxgNMEyq8YgwlIOsDOa9homkUq1PoTMs= github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4= github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -190,85 +158,36 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.3.0 h1:q4c+kbcR0d5rSurhBR8dIgieOaYpXtsdTYfx22Cu6rs= -github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/zapr v0.2.0 h1:v6Ji8yBW77pva6NkJKQdHLAJKrIJKRHz0RXwPqCHSR4= -github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU= +github.com/go-logr/zapr v0.4.0 h1:uc1uML3hRYL9/ZZPdgHS/n8Nzo+eaYL/Efxkkamf7OM= github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= -github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= -github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= -github.com/go-openapi/spec v0.20.0 h1:HGLc8AJ7ynOxwv0Lq4TsnwLsWMawHAYiJIFzbcML86I= -github.com/go-openapi/spec v0.20.0/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= github.com/go-openapi/spec v0.20.3 h1:uH9RQ6vdyPSs2pSy9fL8QPspDF2AMIMPtmK5coSSjtQ= github.com/go-openapi/spec v0.20.3/go.mod h1:gG4F8wdEDN+YPBMVnzE85Rbhf+Th2DTvA9nFPQ5AYEg= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= -github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.12 h1:Bc0bnY2c3AoF7Gc+IMIAQQsD8fLHjHpc19wXvYuayQI= -github.com/go-openapi/swag v0.19.12/go.mod h1:eFdyEBkTdoAf/9RXBvj4cr1nH7GD8Kzo5HTt47gr72M= github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= -github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -289,14 +208,12 @@ github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrU github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= @@ -305,11 +222,10 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= -github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github/v32 v32.1.0 h1:GWkQOdXqviCPx7Q7Fj+KyPoGm4SwHRh8rheoPhd27II= github.com/google/go-github/v32 v32.1.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= @@ -325,14 +241,12 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/googleapis/gnostic v0.5.1 h1:A8Yhf6EtqTv9RMsU6MQTyrtV1TjWlR6xU9BsZIwuTCM= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= @@ -348,6 +262,8 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -375,9 +291,6 @@ github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/J github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.10 h1:6q5mVkdH/vYmqngx7kZQTjJ5HRsx+ImorDIEQ+beJgc= -github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -391,7 +304,6 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -402,12 +314,9 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= github.com/k0kubun/pp v2.3.0+incompatible/go.mod h1:GWse8YhT0p8pT4ir3ZgBbfZild3tgzSScAn6HmfYukg= -github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd h1:Coekwdh0v2wtGp9Gmz1Ze3eVRAWJMLokvN3QjdzCHLY= -github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 h1:DowS9hvgyYSX4TO5NpyC606/Z4SxnNYbT+WX27or6Ck= github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -416,6 +325,7 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= @@ -429,15 +339,12 @@ github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0 github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/markbates/pkger v0.17.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= +github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A= github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= @@ -446,8 +353,6 @@ github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.13 h1:qdl+GuBjcsKKDco5BsxPJlId98mSWNKqYA+Co0SC1yA= github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= @@ -466,7 +371,6 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= -github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -474,17 +378,16 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= @@ -493,20 +396,16 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4= -github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs= -github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/paulmach/orb v0.1.3/go.mod h1:VFlX/8C+IQ1p6FTRRKzKoOPJnvEtA5G0Veuqwbu//Vk= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -521,7 +420,6 @@ github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prY github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= @@ -533,28 +431,25 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/qri-io/starlib v0.4.2-0.20200213133954-ff2e8cd5ef8d/go.mod h1:7DPO4domFU579Ga6E61sB9VFNaniPVwJP5C4bBCu3wA= github.com/rjeczalik/notify v0.9.2 h1:MiTWrPj55mNDHEiIX5YUSKefw/+lCQVoAFmD6oQm5w8= github.com/rjeczalik/notify v0.9.2/go.mod h1:aErll2f0sUX9PXZnVNyeiObbmTlk5jnMoCa4QEjJeqM= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= @@ -562,7 +457,6 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= @@ -571,7 +465,6 @@ github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9 github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= @@ -593,16 +486,13 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.2.3-0.20181224173747-660f15d67dbb/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= @@ -611,14 +501,11 @@ github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6Kllzaw github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasttemplate v1.0.1 h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8WdUSz8= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= -github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= -github.com/xanzy/go-gitlab v0.33.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug= github.com/xanzy/go-gitlab v0.43.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug= -github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= -github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI= github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca h1:1CFlNzQhALwjS9mBAUkycX616GzgsuYUOCHA5+HSlXI= github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -626,49 +513,50 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.starlark.net v0.0.0-20190528202925-30ae18b8564f/go.mod h1:c1/X6cHgvdXj6pUlmWKMkuqRnW4K8x2vwt6JAaaircg= +go.opentelemetry.io/otel v1.0.0-RC2 h1:SHhxSjB+omnGZPgGlKe+QMp3MyazcOHdQ8qwo89oKbg= +go.opentelemetry.io/otel v1.0.0-RC2/go.mod h1:w1thVQ7qbAy8MHb0IFj8a5Q2QU0l2ksf8u/CN8m3NOM= +go.opentelemetry.io/otel/exporters/jaeger v1.0.0-RC2 h1:RF0nWsIDpDBe+s06lkLxUw9CWQUAhO6hBSxxB7dz45s= +go.opentelemetry.io/otel/exporters/jaeger v1.0.0-RC2/go.mod h1:sZZqN3Vb0iT+NE6mZ1S7sNyH3t4PFk6ElK5TLGFBZ7E= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.0-RC2 h1:Z/91DSYkOqnVuECrd+hxCU9lzeo5Fihjp28uq0Izfpw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.0-RC2/go.mod h1:T+s8GKi1OqMwPuZ+ouDtZW4vWYpJuzIzh2Matq4Jo9k= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.0-RC2 h1:PaSlrCE+hRbamroLGGgFDmzDamCxp7ID+hBvPmOhcSc= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.0-RC2/go.mod h1:3shayJIFcDqHi9/GT2fAHyMI/bRgc6FO0CAkhaDkhi0= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.0.0-RC2 h1:crksoFyTPDDywRJDUW36OZma+C3HhcYwQLPUZZMXFO0= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.0.0-RC2/go.mod h1:6kVxj1C/f3irP/IeeZNbcEwbg3rwnM6a7bCrcGbIJeI= +go.opentelemetry.io/otel/sdk v1.0.0-RC2 h1:ROuteeSCBaZNjiT9JcFzZepmInDvLktR28Y6qKo8bCs= +go.opentelemetry.io/otel/sdk v1.0.0-RC2/go.mod h1:fgwHyiDn4e5k40TD9VX243rOxXR+jzsWBZYA2P5jpEw= +go.opentelemetry.io/otel/trace v1.0.0-RC2 h1:dunAP0qDULMIT82atj34m5RgvsIK6LcsXf1c/MsYg1w= +go.opentelemetry.io/otel/trace v1.0.0-RC2/go.mod h1:JPQ+z6nNw9mqEGT8o3eoPTdnNI+Aj5JcxEsVGREIAy4= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.9.0 h1:C0g6TWmQYvjKRnljRULLWUVJGy8Uvu0NEL/5frY2/t4= +go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM= -go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= +go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= @@ -694,9 +582,9 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= @@ -705,14 +593,13 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449 h1:xUIPaMhvROX9dhPvRCenIJtU78+lbEenGbgqB5hfHCQ= golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -721,7 +608,6 @@ golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -732,7 +618,6 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -742,10 +627,8 @@ golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= @@ -757,7 +640,6 @@ golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAG golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -779,10 +661,8 @@ golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -813,20 +693,17 @@ golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210108172913-0df2131ae363 h1:wHn06sgWHMO1VsQ8F+KzDJx/JzqfsNLnc+oEi07qD7s= -golang.org/x/sys v0.0.0-20210108172913-0df2131ae363/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -841,7 +718,6 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= @@ -850,17 +726,12 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6 h1:Vv0JUPWTyeqUq42B2WJ1FeIDjjvGKoA2Ss+Ts0lAVbs= golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -871,15 +742,12 @@ golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -898,19 +766,16 @@ golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200616133436-c1934b75d054 h1:HHeAlu5H9b71C+Fx0K+1dGgVFN1DM1/wz4aoGOA5qS8= -golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.1.0 h1:Phva6wqu+xR//Njw6iorylFFgn/z547tw5Ne3HZPQ+k= -gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= @@ -930,8 +795,6 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -952,17 +815,25 @@ google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4 google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a h1:pOwg4OoaRYScjmR4LlLgdtnyoHYTSAVhhqe5uPdpII8= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0 h1:Klz8I9kdtkIN6EpHHUOMLCYhTn/2WAe5a0s1hcBkdTI= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -971,18 +842,18 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= @@ -1000,88 +871,58 @@ gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRN gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.19.2 h1:q+/krnHWKsL7OBZg/rxnycsl9569Pud76UJ77MvKXms= -k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI= k8s.io/api v0.21.2 h1:vz7DqmRsXTCSa6pNxXwQ1IYeAZgdIsua+DZU+o+SX3Y= k8s.io/api v0.21.2/go.mod h1:Lv6UGJZ1rlMI1qusN8ruAp9PUBFyBwpEHAdG24vIsiU= -k8s.io/apiextensions-apiserver v0.19.2 h1:oG84UwiDsVDu7dlsGQs5GySmQHCzMhknfhFExJMz9tA= -k8s.io/apiextensions-apiserver v0.19.2/go.mod h1:EYNjpqIAvNZe+svXVx9j4uBaVhTB4C94HkY3w058qcg= k8s.io/apiextensions-apiserver v0.21.2 h1:+exKMRep4pDrphEafRvpEi79wTnCFMqKf8LBtlA3yrE= k8s.io/apiextensions-apiserver v0.21.2/go.mod h1:+Axoz5/l3AYpGLlhJDfcVQzCerVYq3K3CvDMvw6X1RA= -k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= -k8s.io/apimachinery v0.19.6 h1:kBLzSGuDdY1NdSV2uFzI+FwZ9wtkmG+X3ZVcWXSqNgA= -k8s.io/apimachinery v0.19.6/go.mod h1:6sRbGRAVY5DOCuZwB5XkqguBqpqLU6q/kOaOdk29z6Q= k8s.io/apimachinery v0.21.2 h1:vezUc/BHqWlQDnZ+XkrpXSmnANSLbpnlpwo0Lhk0gpc= k8s.io/apimachinery v0.21.2/go.mod h1:CdTY8fU/BlvAbJ2z/8kBwimGki5Zp8/fbVuLY8gJumM= -k8s.io/apiserver v0.19.2/go.mod h1:FreAq0bJ2vtZFj9Ago/X0oNGC51GfubKK/ViOKfVAOA= k8s.io/apiserver v0.21.2/go.mod h1:lN4yBoGyiNT7SC1dmNk0ue6a5Wi6O3SWOIw91TsucQw= -k8s.io/client-go v0.19.2 h1:gMJuU3xJZs86L1oQ99R4EViAADUPMHHtS9jFshasHSc= -k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA= k8s.io/client-go v0.21.2 h1:Q1j4L/iMN4pTw6Y4DWppBoUxgKO8LbffEMVEV00MUp0= k8s.io/client-go v0.21.2/go.mod h1:HdJ9iknWpbl3vMGtib6T2PyI/VYxiZfq936WNVHBRrA= -k8s.io/code-generator v0.19.2/go.mod h1:moqLn7w0t9cMs4+5CQyxnfA/HV8MF6aAVENF+WZZhgk= k8s.io/code-generator v0.21.2/go.mod h1:8mXJDCB7HcRo1xiEQstcguZkbxZaqeUOrO9SsicWs3U= -k8s.io/component-base v0.19.2 h1:jW5Y9RcZTb79liEhW3XDVTW7MuvEGP0tQZnfSX6/+gs= -k8s.io/component-base v0.19.2/go.mod h1:g5LrsiTiabMLZ40AR6Hl45f088DevyGY+cCE2agEIVo= k8s.io/component-base v0.21.2 h1:EsnmFFoJ86cEywC0DoIkAUiEV6fjgauNugiw1lmIjs4= k8s.io/component-base v0.21.2/go.mod h1:9lvmIThzdlrJj5Hp8Z/TOgIkdfsNARQ1pT+3PByuiuc= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= -k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= -k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ= -k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kube-openapi v0.0.0-20210527164424-3c818078ee3d h1:lUK8GPtuJy8ClWZhuvKoaLdKGPLq9H1PxWp7VPBZBkU= k8s.io/kube-openapi v0.0.0-20210527164424-3c818078ee3d/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= -k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20200912215256-4140de9c8800 h1:9ZNvfPvVIEsp/T1ez4GQuzCcCTEQWhovSofhqR73A6g= -k8s.io/utils v0.0.0-20200912215256-4140de9c8800/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210527160623-6fdb442a123b h1:MSqsVQ3pZvPGTqCjptfimO2WjG7A9un2zcpiHkA6M/s= k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.9/go.mod h1:dzAXnQbTRyDlZPJX2SUPEqvnB+j7AJjtlox7PEwigU0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/controller-runtime v0.7.0 h1:bU20IBBEPccWz5+zXpLnpVsgBYxqclaHu1pVDl/gEt8= -sigs.k8s.io/controller-runtime v0.7.0/go.mod h1:pJ3YBrJiAqMAZKi6UVGuE98ZrroV1p+pIhoHsMm9wdU= -sigs.k8s.io/controller-runtime v0.9.1 h1:+LAqHAhkVW4lt/jLlrKmnGPA7OORMw/xEUH3Ey1h1Bs= -sigs.k8s.io/controller-runtime v0.9.1/go.mod h1:cTqsgnwSOsYS03XwySYZj8k6vf0+eC4FJRcCgQ9elb4= -sigs.k8s.io/kustomize/kyaml v0.10.5 h1:PbJcsZsEM7O3hHtUWTR+4WkHVbQRW9crSy75or1gRbI= -sigs.k8s.io/kustomize/kyaml v0.10.5/go.mod h1:P6Oy/ah/GZMKzJMIJA2a3/bc8YrBkuL5kJji13PSIzY= -sigs.k8s.io/kustomize/kyaml v0.10.21 h1:KdoEgz3HzmcaLUTFqs6aaqFpsaA9MVRIwOZbi8vMaD0= -sigs.k8s.io/kustomize/kyaml v0.10.21/go.mod h1:TYWhGwW9vjoRh3rWqBwB/ZOXyEGRVWe7Ggc3+KZIO+c= -sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= -sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/controller-runtime v0.9.3 h1:n075bHQ1wb8hpX7C27pNrqsb0fj8mcfCQfNX+oKTbYE= +sigs.k8s.io/controller-runtime v0.9.3/go.mod h1:TxzMCHyEUpaeuOiZx/bIdc2T81vfs/aKdvJt9wuu0zk= +sigs.k8s.io/kustomize/kyaml v0.11.1-0.20210721155208-d6ce84604738 h1:Nkg3viu9IE/TSzvYt4GGy5FkhdPk3bptXuxW5TnU9uo= +sigs.k8s.io/kustomize/kyaml v0.11.1-0.20210721155208-d6ce84604738/go.mod h1:FTJxEZ86ScK184NpGSAQcfEqee0nul8oLCK30D47m4E= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8= sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= diff --git a/pkg/content/constructors.go b/pkg/content/constructors.go new file mode 100644 index 00000000..4b8d032c --- /dev/null +++ b/pkg/content/constructors.go @@ -0,0 +1,142 @@ +package content + +import ( + "bytes" + "os" + "path/filepath" + "strings" + "testing/iotest" + + "github.com/weaveworks/libgitops/pkg/content/metadata" +) + +// newErrReader makes a Reader implementation that only returns the given error on Read() +func newErrReader(err error, opts ...metadata.HeaderOption) Reader { + return NewReader(iotest.ErrReader(err), opts...) +} + +const ( + stdinPath = "/dev/stdin" + stdoutPath = "/dev/stdout" + stderrPath = "/dev/stderr" +) + +func FromStdin(opts ...metadata.HeaderOption) Reader { + return FromFile(stdinPath, opts...) +} + +// FromFile returns an io.ReadCloser from the given file, or an io.ReadCloser which returns +// the given file open error when read. +func FromFile(filePath string, opts ...metadata.HeaderOption) Reader { + // Support stdin + if filePath == "-" || filePath == stdinPath { + // Mark the source as /dev/stdin + opts = append(opts, metadata.WithContentLocation(stdinPath)) + // TODO: Maybe have a way to override the TracerName through Metadata? + return NewReader(os.Stdin, opts...) + } + + // Make sure the path is absolute + filePath, err := filepath.Abs(filePath) + if err != nil { + return newErrReader(err, opts...) + } + // Report the file path in the X-Content-Location header + opts = append(opts, metadata.WithContentLocation(filePath)) + + // Open the file + f, err := os.Open(filePath) + if err != nil { + return newErrReader(err, opts...) + } + fi, err := f.Stat() + if err != nil { + return newErrReader(err, opts...) + } + + // Register the Content-Length header + opts = append(opts, metadata.WithContentLength(fi.Size())) + + return NewReader(f, opts...) +} + +// FromBytes returns an io.Reader from the given byte content. +func FromBytes(content []byte, opts ...metadata.HeaderOption) Reader { + // Register the Content-Length + opts = append(opts, metadata.WithContentLength(int64(len(content)))) + // Read from a *bytes.Reader + return NewReader(bytes.NewReader(content), opts...) +} + +// FromString returns an io.Reader from the given string content. +func FromString(content string, opts ...metadata.HeaderOption) Reader { + // Register the Content-Length + opts = append(opts, metadata.WithContentLength(int64(len(content)))) + // Read from a *strings.Reader + return NewReader(strings.NewReader(content), opts...) +} + +/*func From/ToHTTPResponse(resp *http.Response, opts ...metadata.HeaderOption) Reader { + TODO +}*/ + +func ToStdout(opts ...metadata.HeaderOption) Writer { + return ToFile(stdoutPath, opts...) +} +func ToStderr(opts ...metadata.HeaderOption) Writer { + return ToFile(stderrPath, opts...) +} +func ToBuffer(buf *bytes.Buffer, opts ...metadata.HeaderOption) Writer { + return NewWriter(buf, opts...) +} + +func ToFile(filePath string, opts ...metadata.HeaderOption) Writer { + // Shorthands for pipe IO + if filePath == "-" || filePath == stdoutPath { + // Mark the target as /dev/stdout + opts = append(opts, metadata.WithContentLocation(stdoutPath)) + return NewWriter(os.Stdout, opts...) + } + if filePath == stderrPath { + // Mark the target as /dev/stderr + opts = append(opts, metadata.WithContentLocation(stderrPath)) + return NewWriter(os.Stderr, opts...) + } + + // Make sure the path is absolute + // TODO: Maybe we actually support "relative" paths as a separate type; and not modify + filePath, err := filepath.Abs(filePath) + if err != nil { + return newErrWriter(err, opts...) + } + // Report the file path in the X-Content-Location header + opts = append(opts, metadata.WithContentLocation(filePath)) + + // Make sure all directories are created + if err := os.MkdirAll(filepath.Dir(filePath), 0755); err != nil { + return newErrWriter(err, opts...) + } + + // Create or truncate the file + f, err := os.Create(filePath) + if err != nil { + return newErrWriter(err, opts...) + } + + // Register the Content-Length header + fi, err := f.Stat() + if err != nil { + return newErrWriter(err, opts...) + } + opts = append(opts, metadata.WithContentLength(fi.Size())) + + return NewWriter(f, opts...) +} + +func newErrWriter(err error, opts ...metadata.HeaderOption) Writer { + return NewWriter(&errWriter{err}, opts...) +} + +type errWriter struct{ err error } + +func (w *errWriter) Write([]byte) (int, error) { return 0, w.err } diff --git a/pkg/content/errors.go b/pkg/content/errors.go new file mode 100644 index 00000000..164e3c89 --- /dev/null +++ b/pkg/content/errors.go @@ -0,0 +1,43 @@ +package content + +import ( + "fmt" + + "github.com/weaveworks/libgitops/pkg/util/structerr" +) + +// Enforce all struct errors implementing structerr.StructError +var _ structerr.StructError = &UnsupportedContentTypeError{} + +// ErrUnsupportedContentType creates a new *UnsupportedContentTypeError +func ErrUnsupportedContentType(unsupported ContentType, supported ...ContentType) *UnsupportedContentTypeError { + return &UnsupportedContentTypeError{Unsupported: unsupported, Supported: supported} +} + +// UnsupportedContentTypeError describes that the supplied content type is not supported by an +// implementation handling different content types. +// +// This error can be checked for equality using errors.Is(err, &UnsupportedContentTypeError{}) +type UnsupportedContentTypeError struct { + // Unsupported is the content type that was given but not supported + // +required + Unsupported ContentType + // Supported is optional; if len(Supported) != 0, it lists the content types that indeed + // are supported by the implementation. If len(Supported) == 0, it should not be used + // as an indicator. + // +optional + Supported []ContentType +} + +func (e *UnsupportedContentTypeError) Error() string { + msg := fmt.Sprintf("unsupported content type: %q", e.Unsupported) + if len(e.Supported) != 0 { + msg = fmt.Sprintf("%s. supported content types: %v", msg, e.Supported) + } + return msg +} + +func (e *UnsupportedContentTypeError) Is(target error) bool { + _, ok := target.(*UnsupportedContentTypeError) + return ok +} diff --git a/pkg/content/interfaces.go b/pkg/content/interfaces.go new file mode 100644 index 00000000..3c85964f --- /dev/null +++ b/pkg/content/interfaces.go @@ -0,0 +1,139 @@ +package content + +import ( + "context" + "fmt" + "io" + + "github.com/weaveworks/libgitops/pkg/content/metadata" +) + +var _ fmt.Stringer = ContentType("") + +type ContentType string + +const ( + ContentTypeYAML ContentType = "application/yaml" + ContentTypeJSON ContentType = "application/json" +) + +func (ct ContentType) ContentType() ContentType { return ct } +func (ct ContentType) String() string { return string(ct) } + +type ContentTypes []ContentType + +func (cts ContentTypes) Has(want ContentType) bool { + for _, ct := range cts { + if ct == want { + return true + } + } + return false +} + +func WithContentType(ct ContentType) metadata.HeaderOption { + return metadata.SetOption(metadata.ContentTypeKey, ct.String()) +} + +type ContentTyped interface { + ContentType() ContentType +} + +type ContentTypeSupporter interface { + // Order _might_ carry a meaning + SupportedContentTypes() ContentTypes +} + +// underlying is the underlying stream of the Reader. +// If the returned io.Reader does not implement io.Closer, +// the underlying.Close() method will be re-used. +type WrapReaderFunc func(underlying io.ReadCloser) io.Reader + +type WrapWriterFunc func(underlying io.WriteCloser) io.Writer + +type WrapReaderToSegmentFunc func(underlying io.ReadCloser) RawSegmentReader + +// Reader is a tracing-capable and metadata-bound io.Reader and io.Closer +// wrapper. It is NOT thread-safe by default. It supports introspection +// of composite ReadClosers. The TracerProvider from the given context +// is used. +// +// The Reader reads the current span from the given context, and uses that +// span's TracerProvider to create a Tracer and then also a new Span for +// the current operation. +type Reader interface { + // These call the underlying Set/ClearContext functions before/after + // reads and closes, and then uses the underlying io.ReadCloser. + // If the underlying Reader doesn't support closing, the returned + // Close method will only log a "CloseNoop" trace and exit with err == nil. + WithContext(ctx context.Context) io.ReadCloser + + // This reader supports registering metadata about the content it + // is reading. + MetadataContainer + + // Wrap returns a new Reader with io.ReadCloser B that reads from + // the current Reader's underlying io.ReadCloser A. If the returned + // B is an io.ReadCloser or this Reader's HasCloser() is true, + // HasCloser() of the returned Reader will be true, otherwise false. + Wrap(fn WrapReaderFunc) Reader + WrapSegment(fn WrapReaderToSegmentFunc) SegmentReader +} + +type RawSegmentReader interface { + Read() ([]byte, error) +} + +type ClosableRawSegmentReader interface { + RawSegmentReader + io.Closer +} + +type SegmentReader interface { + WithContext(ctx context.Context) ClosableRawSegmentReader + + MetadataContainer +} + +// In the future, one can implement a WrapSegment function that is of +// the following form: +// WrapSegment(name string, fn WrapSegmentFunc) SegmentReader +// where WrapSegmentFunc is func(underlying ClosableRawSegmentReader) RawSegmentReader +// This allows chaining simple composite SegmentReaders + +type Writer interface { + WithContext(ctx context.Context) io.WriteCloser + + // This writer supports registering metadata about the content it + // is writing and the destination it is writing to. + MetadataContainer + + Wrap(fn WrapWriterFunc) Writer +} + +type readerInternal interface { + Reader + RawReader() io.Reader + RawCloser() io.Closer +} + +type segmentReaderInternal interface { + SegmentReader + RawSegmentReader() RawSegmentReader + RawCloser() io.Closer +} + +type writerInternal interface { + Writer + RawWriter() io.Writer + RawCloser() io.Closer +} + +// The internal implementation structs should implement the +// ...Internal interfaces, in order to expose their raw, underlying resources +// just in case it is _really_ needed upstream (e.g. for testing). It is not +// exposed by default in the interface to avoid showing up in Godoc, as it +// most often shouldn't be used. +var _ readerInternal = &reader{} +var _ segmentReaderInternal = &segmentReader{} +var _ writerInternal = &writer{} diff --git a/pkg/content/metadata.go b/pkg/content/metadata.go new file mode 100644 index 00000000..a17f3f16 --- /dev/null +++ b/pkg/content/metadata.go @@ -0,0 +1,101 @@ +package content + +import ( + "encoding/json" + "net/textproto" + "net/url" + + "github.com/weaveworks/libgitops/pkg/content/metadata" +) + +// Metadata is the interface that's common to contentMetadataOptions and a wrapper +// around a HTTP request. +type Metadata interface { + metadata.Header + metadata.HeaderOption + + // Apply applies the given Options to itself and returns itself, without + // any deep-copying. + Apply(opts ...metadata.HeaderOption) Metadata + // ContentLength retrieves the standard "Content-Length" header + ContentLength() (int64, bool) + // ContentType retrieves the standard "Content-Type" header + ContentType() (ContentType, bool) + // ContentLocation retrieves the custom "X-Content-Location" header + ContentLocation() (*url.URL, bool) + + // Clone makes a deep copy of the Metadata + // TODO: Do we need this anymore? + Clone() Metadata + + ToContainer() MetadataContainer +} + +var _ Metadata = contentMetadata{} + +var _ json.Marshaler = contentMetadata{} + +func (m contentMetadata) MarshalJSON() ([]byte, error) { + return json.Marshal(m.MIMEHeader) +} + +func (m contentMetadata) ApplyToHeader(target metadata.Header) { + for k, vals := range m.MIMEHeader { + for i, val := range vals { + if i == 0 { + target.Set(k, val) + } else { + target.Add(k, val) + } + } + } +} + +func (m contentMetadata) Apply(opts ...metadata.HeaderOption) Metadata { + for _, opt := range opts { + opt.ApplyToHeader(m) + } + return m +} + +func (m contentMetadata) ContentLength() (int64, bool) { + return metadata.GetInt64(m, metadata.ContentLengthKey) +} + +func (m contentMetadata) ContentType() (ContentType, bool) { + ct, ok := metadata.GetString(m, metadata.ContentTypeKey) + return ContentType(ct), ok +} + +func (m contentMetadata) ContentLocation() (*url.URL, bool) { + return metadata.GetURL(m, metadata.XContentLocationKey) +} + +func (m contentMetadata) ToContainer() MetadataContainer { + return &metadataContainer{m} +} + +func (m contentMetadata) Clone() Metadata { + m2 := make(textproto.MIMEHeader, len(m.MIMEHeader)) + for k, v := range m.MIMEHeader { + m2[k] = v + } + return contentMetadata{m2} +} + +type MetadataContainer interface { + // ContentMetadata + ContentMetadata() Metadata +} + +func NewMetadata(opts ...metadata.HeaderOption) Metadata { + return contentMetadata{MIMEHeader: textproto.MIMEHeader{}}.Apply(opts...) +} + +type contentMetadata struct { + textproto.MIMEHeader +} + +type metadataContainer struct{ m Metadata } + +func (b *metadataContainer) ContentMetadata() Metadata { return b.m } diff --git a/pkg/content/metadata/metadata.go b/pkg/content/metadata/metadata.go new file mode 100644 index 00000000..062a2b7b --- /dev/null +++ b/pkg/content/metadata/metadata.go @@ -0,0 +1,157 @@ +package metadata + +import ( + "mime" + "net/textproto" + "net/url" + "strconv" + "strings" +) + +/* + Metadata origin: + + content.FromFile -> content.Reader + - X-Content-Location + - Content-Length + + content.FromBytes -> content.Reader + - Content-Length + + content.FromString -> content.Reader + - Content-Length + + content.ToFile -> content.Writer + - X-Content-Location + + content.ToEmptyBuffer -> content.Writer + + frame.newYAMLReader -> frame.Reader + - Content-Type => YAML + + frame.newJSONReader -> frame.Reader + - Content-Type => JSON + + frame.newRecognizingReader -> frame.Reader + - If Content-Type is set, try use FramingType == ContentType + - If X-Content-Location is set, try deduce ContentType from that + - Peek the buffer, and check if JSON + +*/ + +//func NewMetadataContainer(m Metadata) MetadataContainer { return &MetadataContainer{m} } + +const ( + XContentLocationKey = "X-Content-Location" + //XFramingTypeKey = "X-Framing-Type" + + ContentLengthKey = "Content-Length" + ContentTypeKey = "Content-Type" + AcceptKey = "Accept" +) + +type HeaderOption interface { + // Rename to ApplyMetadataHeader? + ApplyToHeader(target Header) +} + +/*func NewContentTypeOption(ct ContentType) setHeaderOption { + return setHeaderOption{Key: ContentLengthKey, Value: ct.String()} +}*/ + +var _ HeaderOption = setHeaderOption{} + +func SetOption(k, v string) HeaderOption { + return setHeaderOption{Key: k, Value: v} +} + +func WithContentLength(len int64) HeaderOption { + return SetOption(ContentLengthKey, strconv.FormatInt(len, 10)) +} + +func WithContentLocation(loc string) HeaderOption { + return SetOption(XContentLocationKey, loc) +} + +func WithAccept(accepts ...string) HeaderOption { + return addHeaderOption{Key: AcceptKey, Values: accepts} +} + +type setHeaderOption struct{ Key, Value string } + +func (o setHeaderOption) ApplyToHeader(target Header) { + target.Set(o.Key, o.Value) +} + +type addHeaderOption struct { + Key string + Values []string +} + +func (o addHeaderOption) ApplyToHeader(target Header) { + for _, val := range o.Values { + target.Add(o.Key, val) + } +} + +// Make sure the interface is compatible with the targeted textproto.MIMEHeader +var _ Header = textproto.MIMEHeader{} + +// Express the string-string map interface of the net/textproto.Header map +type Header interface { + Add(key, value string) + Set(key, value string) + Get(key string) string + Values(key string) []string + Del(key string) +} + +// TODO: Public or private? + +func GetString(m Header, key string) (string, bool) { + if len(m.Values(key)) == 0 { + return "", false + } + return m.Get(key), true +} + +func GetInt64(m Header, key string) (int64, bool) { + i, err := strconv.ParseInt(m.Get(key), 10, 64) + if err != nil { + return 0, false + } + return i, true +} + +func GetURL(m Header, key string) (*url.URL, bool) { + str, ok := GetString(m, key) + if !ok { + return nil, false + } + u, err := url.Parse(str) + if err != nil { + return nil, false + } + return u, true +} + +func GetMediaTypes(m Header, key string) (mediaTypes []string, err error) { + for _, commaSepVal := range m.Values(key) { + for _, mediaTypeStr := range strings.Split(commaSepVal, ",") { + mediaType, _, err := mime.ParseMediaType(mediaTypeStr) + if err != nil { + return nil, err + } + mediaTypes = append(mediaTypes, mediaType) + } + } + return +} + +/* + Content-Encoding + Content-Length + Content-Type + Last-Modified + ETag +*/ diff --git a/pkg/content/metadata/metadata_test.go b/pkg/content/metadata/metadata_test.go new file mode 100644 index 00000000..8d2a77f1 --- /dev/null +++ b/pkg/content/metadata/metadata_test.go @@ -0,0 +1,125 @@ +package metadata + +import ( + "bufio" + "bytes" + "fmt" + "mime" + "net/textproto" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/runtime/serializer/json" + "sigs.k8s.io/kustomize/kyaml/kio" + "sigs.k8s.io/yaml" +) + +func TestMIME(t *testing.T) { + for _, part := range strings.Split("text/html, application/xhtml+xml, application/xml;q=0.9, image/webp, */*;q=0.8", ",") { + t.Error(mime.ParseMediaType(part)) + } +} + +func TestTypePrint(t *testing.T) { + t.Error(fmt.Printf("%T\n", bytes.NewBuffer(nil))) + t.Error(fmt.Printf("%T\n", json.Framer.NewFrameReader(nil))) +} + +func TestK8sYAML(t *testing.T) { + c := []byte("\n---\n\n---\n f : fo\n\n---\n \n---\nbar: true") //[]byte("\n---\nfoo:\n- bar: true") + + /*var obj interface{} + b, err := yaml.YAMLToJSON(c) + t.Error(string(b), err) + err = yaml.Unmarshal(c, &obj)*/ + /*for _, subobj := range obj.([]interface{}) { + t.Error(subobj.(map[string]interface{})) + }*/ + //t.Error(obj, err) + /*n := goyaml.Node{} + err = goyaml.Unmarshal(c, &n) + nb, err2 := goyaml.Marshal(n) + t.Error(string(nb), err, err2)*/ + rn, err := kio.FromBytes(c) + for _, n := range rn { + t.Error(n.MustString()) + } + t.Error(err) +} + +func TestBufio(t *testing.T) { + r := strings.NewReader("foo: bar") + br := bufio.NewReaderSize(r, 2048) + c, err := br.Peek(2048) + t.Error(string(c), err) +} + +const fooYAML = ` + +--- + +--- +baz: 123 +foo: bar +bar: true +--- +foo: bar +bar: true + +` + +func TestFoo(t *testing.T) { + //u, err := url.Parse("file:///foo/bar") + /*u := &url.URL{ + //Scheme: "file", + Path: ".", + } + t.Error(u, nil, u.RequestURI(), u.Host, u.Scheme)*/ + + obj := map[string]interface{}{} + + err := yaml.UnmarshalStrict([]byte(fooYAML), &obj) + t.Errorf("%+v %v", obj, err) +} + +func TestGetMediaTypes(t *testing.T) { + tests := []struct { + name string + opts []HeaderOption + key string + wantMediaTypes []string + wantErr error + }{ + { + name: "multiple keys, and values in one key", + opts: []HeaderOption{ + WithAccept("application/yaml", "application/xml"), + WithAccept("application/json"), + WithAccept("text/html, application/xhtml+xml, application/xml;q=0.9, image/webp, */*;q=0.8"), + }, + key: AcceptKey, + wantMediaTypes: []string{ + "application/yaml", + "application/xml", + "application/json", + "text/html", + "application/xhtml+xml", + "application/xml", + "image/webp", + "*/*", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + h := textproto.MIMEHeader{} + for _, opt := range tt.opts { + opt.ApplyToHeader(h) + } + gotMediaTypes, err := GetMediaTypes(h, tt.key) + assert.Equal(t, tt.wantMediaTypes, gotMediaTypes) + assert.ErrorIs(t, err, tt.wantErr) + }) + } +} diff --git a/pkg/content/reader.go b/pkg/content/reader.go new file mode 100644 index 00000000..e417096b --- /dev/null +++ b/pkg/content/reader.go @@ -0,0 +1,244 @@ +package content + +import ( + "context" + "errors" + "io" + "os" + + "github.com/weaveworks/libgitops/pkg/content/metadata" + "github.com/weaveworks/libgitops/pkg/tracing" + "github.com/weaveworks/libgitops/pkg/util/compositeio" + "github.com/weaveworks/libgitops/pkg/util/limitedio" + "go.opentelemetry.io/otel/trace" +) + +type contextLock interface { + setContext(ctx context.Context) + clearContext() +} + +type contextLockImpl struct { + ctx context.Context +} + +func (l *contextLockImpl) setContext(ctx context.Context) { l.ctx = ctx } +func (l *contextLockImpl) clearContext() { l.ctx = nil } + +type readContextLockImpl struct { + contextLockImpl + r io.Reader + metaGetter MetadataContainer + underlyingLock contextLock +} + +func (r *readContextLockImpl) Read(p []byte) (n int, err error) { + ft := tracing.FromContext(r.ctx, r.r) + err = ft.TraceFunc(r.ctx, "Read", func(ctx context.Context, span trace.Span) error { + var tmperr error + if r.underlyingLock != nil { + r.underlyingLock.setContext(ctx) + } + n, tmperr = r.r.Read(p) + if r.underlyingLock != nil { + r.underlyingLock.clearContext() + } + // Register metadata in the span + span.SetAttributes(SpanAttrByteContentCap(p[:n], len(p))...) + return tmperr + }, trace.WithAttributes(SpanAttrContentMetadata(r.metaGetter.ContentMetadata()))).RegisterCustom(SpanRegisterReadError) + return +} + +type closeContextLockImpl struct { + contextLockImpl + c io.Closer + metaGetter MetadataContainer + underlyingLock contextLock +} + +func (c *closeContextLockImpl) Close() error { + spanName := "Close" + if c.c == nil { + spanName = "CloseNoop" + } + + ft := tracing.FromContext(c.ctx, c.c) + return ft.TraceFunc(c.ctx, spanName, func(ctx context.Context, _ trace.Span) error { + // Don't close if c.c is nil + if c.c == nil { + return nil + } + + if c.underlyingLock != nil { + c.underlyingLock.setContext(ctx) + } + // Close the underlying resource + err := c.c.Close() + if c.underlyingLock != nil { + c.underlyingLock.clearContext() + } + return err + }, trace.WithAttributes(SpanAttrContentMetadata(c.metaGetter.ContentMetadata()))).Register() +} + +type reader struct { + MetadataContainer + read *readContextLockImpl + close *closeContextLockImpl +} + +type readerWithContext struct { + read *readContextLockImpl + ctx context.Context +} + +func (r *readerWithContext) Read(p []byte) (n int, err error) { + r.read.setContext(r.ctx) + n, err = r.read.Read(p) + r.read.clearContext() + return +} + +type closerWithContext struct { + close *closeContextLockImpl + ctx context.Context +} + +func (r *closerWithContext) Close() error { + r.close.setContext(r.ctx) + err := r.close.Close() + r.close.clearContext() + return err +} + +func (r *reader) WithContext(ctx context.Context) io.ReadCloser { + return compositeio.ReadCloser(&readerWithContext{r.read, ctx}, &closerWithContext{r.close, ctx}) +} +func (r *reader) RawReader() io.Reader { return r.read.r } +func (r *reader) RawCloser() io.Closer { return r.close.c } + +// Maybe allow adding extra attributes at the end? +func (r *reader) Wrap(wrapFn WrapReaderFunc) Reader { + newReader := wrapFn(compositeio.ReadCloser(r.read, r.close)) + if newReader == nil { + panic("newReader must not be nil") + } + // If an io.Closer is not returned, close this + // Reader's stream instead. Importantly enough, + // a trace will be registered for both this + // Reader, and the returned one. + newCloser, ok := newReader.(io.Closer) + if !ok { + newCloser = r.close + } + + mb := r.ContentMetadata().Clone().ToContainer() + + return &reader{ + MetadataContainer: mb, + read: &readContextLockImpl{ + r: newReader, + metaGetter: mb, + underlyingLock: r.read, + }, + close: &closeContextLockImpl{ + c: newCloser, + metaGetter: mb, + underlyingLock: r.close, + }, + } +} + +func (r *reader) WrapSegment(wrapFn WrapReaderToSegmentFunc) SegmentReader { + newSegmentReader := wrapFn(compositeio.ReadCloser(r.read, r.close)) + if newSegmentReader == nil { + panic("newSegmentReader must not be nil") + } + + // If an io.Closer is not returned, close this + // Reader's stream instead. Importantly enough, + // a trace will be registered for both this + // Reader, and the returned one. + newCloser, ok := newSegmentReader.(io.Closer) + if !ok { + newCloser = r.close + } + + mb := r.ContentMetadata().Clone().ToContainer() + + return &segmentReader{ + MetadataContainer: mb, + read: &readSegmentContextLockImpl{ + r: newSegmentReader, + metaGetter: mb, + underlyingLock: r.read, + }, + close: &closeContextLockImpl{ + c: newCloser, + metaGetter: mb, + underlyingLock: r.close, + }, + } +} + +func NewReader(r io.Reader, opts ...metadata.HeaderOption) Reader { + // If it already is a Reader, just return it + rr, ok := r.(Reader) + if ok { + return rr + } + + // Use the closer if available + c, _ := r.(io.Closer) + // Never close stdio + if isStdio(r) { + c = nil + } + mb := NewMetadata(opts...).ToContainer() + + return &reader{ + MetadataContainer: mb, + read: &readContextLockImpl{ + r: r, + metaGetter: mb, + // underlyingLock is nil + }, + close: &closeContextLockImpl{ + c: c, + metaGetter: mb, + // underlyingLock is nil + }, + } +} + +func isStdio(s interface{}) bool { + f, ok := s.(*os.File) + if !ok { + return false + } + return int(f.Fd()) < 3 +} + +// SpanRegisterReadError registers io.EOF as an "event", and other errors as "unknown errors" in the trace +func SpanRegisterReadError(span trace.Span, err error) { + // Register the error with the span. EOF is expected at some point, + // hence, register that as an event instead of an error + if errors.Is(err, io.EOF) { + span.AddEvent("EOF") + } else if err != nil { + span.RecordError(err) + } +} + +type ResetCounterFunc func() + +func WrapLimited(r Reader, maxFrameSize limitedio.Limit) (Reader, ResetCounterFunc) { + var reset ResetCounterFunc + limitedR := r.Wrap(func(underlying io.ReadCloser) io.Reader { + lr := limitedio.NewReader(underlying, maxFrameSize) + reset = lr.ResetCounter + return lr + }) + return limitedR, reset +} diff --git a/pkg/content/reader_test.go b/pkg/content/reader_test.go new file mode 100644 index 00000000..98b6aea3 --- /dev/null +++ b/pkg/content/reader_test.go @@ -0,0 +1,62 @@ +package content + +import ( + "bytes" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_isStdio(t *testing.T) { + tmp := t.TempDir() + f, err := os.Create(filepath.Join(tmp, "foo.txt")) + require.Nil(t, err) + defer f.Close() + tests := []struct { + name string + in interface{} + want bool + }{ + { + name: "os.Stdin", + in: os.Stdin, + want: true, + }, + { + name: "os.Stdout", + in: os.Stdout, + want: true, + }, + { + name: "os.Stderr", + in: os.Stderr, + want: true, + }, + { + name: "*bytes.Buffer", + in: bytes.NewBufferString("FooBar"), + }, + { + name: "*strings.Reader", + in: strings.NewReader("FooBar"), + }, + { + name: "*strings.Reader", + in: strings.NewReader("FooBar"), + }, + { + name: "*os.File", + in: f, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := isStdio(tt.in) + assert.Equal(t, got, tt.want) + }) + } +} diff --git a/pkg/content/recognizing.go b/pkg/content/recognizing.go new file mode 100644 index 00000000..ed3d198b --- /dev/null +++ b/pkg/content/recognizing.go @@ -0,0 +1,203 @@ +package content + +import ( + "bufio" + "bytes" + "context" + "errors" + "io" + "path/filepath" + + "github.com/weaveworks/libgitops/pkg/content/metadata" + "github.com/weaveworks/libgitops/pkg/tracing" + "github.com/weaveworks/libgitops/pkg/util/compositeio" + "go.opentelemetry.io/otel/trace" + yamlutil "k8s.io/apimachinery/pkg/util/yaml" + "sigs.k8s.io/yaml" +) + +const peekSize = 2048 + +type ContentTypeRecognizer interface { + FromContentMetadata(m Metadata) (ct ContentType, ok bool) + FromPeekBytes(peek []byte) (ct ContentType, ok bool) + + // SupportedContentTypes() tells about what ContentTypes are supported by this recognizer + ContentTypeSupporter +} + +func NewJSONYAMLRecognizingReader(ctx context.Context, r Reader) (Reader, ContentType, error) { + return NewRecognizingReader(ctx, r, NewJSONYAMLContentTypeRecognizer()) +} + +func NewRecognizingReader(ctx context.Context, r Reader, ctrec ContentTypeRecognizer) (Reader, ContentType, error) { + // If r already has Content-Type set, all good + meta := r.ContentMetadata() + ct, ok := meta.ContentType() + if ok { + return r, ct, nil + } + + // Try to resolve the Content-Type from the X-Content-Location header + ct, ok = ctrec.FromContentMetadata(meta) + if ok { + meta.Apply(WithContentType(ct)) + return r, ct, nil + } + + var newr Reader + err := tracing.FromContext(ctx, "content").TraceFunc(ctx, "NewRecognizingReader", + func(ctx context.Context, span trace.Span) error { + + // Use the context to access the io.ReadCloser + rc := r.WithContext(ctx) + meta := r.ContentMetadata().Clone() + + bufr := bufio.NewReaderSize(rc, peekSize) + + peek, err := bufr.Peek(peekSize) + if err != nil && !errors.Is(err, io.EOF) { + return err + } + + // Write to ct defined earlier, that value will be returned if err == nil + ct, ok = ctrec.FromPeekBytes(peek) + if !ok { + // TODO: Struct error; include the peek in the context too + return errors.New("couldn't recognize content type") + } + + // Set the right recognized content type + meta.Apply(WithContentType(ct)) + + // Read from the buffered bufio.Reader, because we have already peeked + // data from the underlying rc. Close rc when done. + newr = NewReader(compositeio.ReadCloser(bufr, rc), meta) + return nil + }).Register() + if err != nil { + return nil, "", err + } + + return newr, ct, nil +} + +func NewRecognizingWriter(w Writer, ctrec ContentTypeRecognizer) (Writer, ContentType, error) { + // If r already has Content-Type set, all good + meta := w.ContentMetadata() + ct, ok := meta.ContentType() + if ok { + return w, ct, nil + } + + // Try to resolve the Content-Type from the X-Content-Location header + ct, ok = ctrec.FromContentMetadata(meta) + if ok { + meta.Apply(WithContentType(ct)) + return w, ct, nil + } + + // Negotiate the Accept header + ct, ok = negotiateAccept(meta, ctrec.SupportedContentTypes()) + if ok { + meta.Apply(WithContentType(ct)) + return w, ct, nil + } + + return nil, "", errors.New("couldn't recognize content type") +} + +const acceptAll ContentType = "*/*" + +func negotiateAccept(meta Metadata, supportedTypes []ContentType) (ContentType, bool) { + accepts, err := metadata.GetMediaTypes(meta, metadata.AcceptKey) + if err != nil { + return "", false + } + + // prioritize the order that the metadata is asking for. supported is in priority order too + for _, accept := range accepts { + for _, supported := range supportedTypes { + if matchesAccept(ContentType(accept), supported) { + return supported, true + } + } + } + return "", false +} + +func matchesAccept(accept, supported ContentType) bool { + if accept == acceptAll { + return true + } + return accept == supported +} + +func NewJSONYAMLContentTypeRecognizer() ContentTypeRecognizer { + return jsonYAMLContentTypeRecognizer{} +} + +type jsonYAMLContentTypeRecognizer struct { +} + +var defaultExtMap = map[string]ContentType{ + ".json": ContentTypeJSON, + ".yml": ContentTypeYAML, + ".yaml": ContentTypeYAML, +} + +func (jsonYAMLContentTypeRecognizer) FromContentMetadata(m Metadata) (ContentType, bool) { + loc, ok := metadata.GetString(m, metadata.XContentLocationKey) + if !ok { + return "", false + } + ext := filepath.Ext(loc) + ct, ok := defaultExtMap[ext] + if !ok { + return "", false + } + return ct, true +} + +func (jsonYAMLContentTypeRecognizer) FromPeekBytes(peek []byte) (ContentType, bool) { + // Check if this is JSON or YAML + if yamlutil.IsJSONBuffer(peek) { + return ContentTypeJSON, true + } else if isYAML(peek) { + return ContentTypeYAML, true + } + return "", false +} + +func (jsonYAMLContentTypeRecognizer) SupportedContentTypes() ContentTypes { + return []ContentType{ContentTypeJSON, ContentTypeYAML} +} + +func isYAML(peek []byte) bool { + line, err := getLine(peek) + if err != nil { + return false + } + + o := map[string]interface{}{} + err = yaml.Unmarshal(line, &o) + return err == nil +} + +func getLine(peek []byte) ([]byte, error) { + s := bufio.NewScanner(bytes.NewReader(peek)) + // TODO: Support very long lines? (over 65k bytes?) Probably not + for s.Scan() { + t := bytes.TrimSpace(s.Bytes()) + // TODO: Ignore comments + if len(t) == 0 || bytes.Equal(t, []byte("---")) { + continue + } + return t, nil + } + // Return a possible scanning error + if err := s.Err(); err != nil { + return nil, err + } + return nil, errors.New("couldn't find non-empty line in scanner") +} diff --git a/pkg/content/recognizing_reader_test.go b/pkg/content/recognizing_reader_test.go new file mode 100644 index 00000000..804f237a --- /dev/null +++ b/pkg/content/recognizing_reader_test.go @@ -0,0 +1,96 @@ +package content + +import ( + "bufio" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +func Test_isYAML(t *testing.T) { + tests := []struct { + name string + peek string + want bool + }{ + { + name: "field mapping", + peek: "foo: bar\n", + want: true, + }, + { + name: "spaces and other empty documents", + peek: `--- + + +--- +--- +foo: bar`, + want: true, + }, + { + name: "bool", + peek: "foo: true", + want: true, + }, + { + name: "int", + peek: "foo: 5", + want: true, + }, + { + name: "float", + peek: "foo: 5.1", + want: true, + }, + { + name: "float", + peek: "foo: null", + want: true, + }, + { + name: "beginning of struct", + peek: "foo:", + want: true, + }, + { + name: "scalar null", + peek: `null`, + want: true, + }, + { + name: "nothing", + }, + { + name: "line overflow", + peek: strings.Repeat("a", bufio.MaxScanTokenSize) + ": true", + }, + + { + name: "list element struct", + peek: "- foo: bar", + }, + { + name: "list element string", + peek: "- foo", + }, + { + name: "scalar string", + peek: `foo`, + }, + { + name: "scalar int", + peek: `5`, + }, + { + name: "scalar float", + peek: `5.1`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, isYAML([]byte(tt.peek)), tt.want) + }) + } +} diff --git a/pkg/content/recognizing_test.go b/pkg/content/recognizing_test.go new file mode 100644 index 00000000..0350a6c7 --- /dev/null +++ b/pkg/content/recognizing_test.go @@ -0,0 +1,69 @@ +package content + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaveworks/libgitops/pkg/content/metadata" +) + +func Test_negotiateAccept(t *testing.T) { + tests := []struct { + name string + accepts []string + supported []ContentType + want ContentType + wantOk bool + }{ + { + name: "accepts has higher priority than supported", + // application/bar is not supported, but the second highest priority does + accepts: []string{"application/bar", "application/json", "application/yaml"}, + supported: []ContentType{"application/foo", "application/yaml", "application/json"}, + want: "application/json", + wantOk: true, + }, + { + name: "no accepts should give empty result", + supported: []ContentType{"application/foo", "application/yaml", "application/json"}, + }, + { + name: "no supported should give empty result", + accepts: []string{"application/bar", "application/json", "application/yaml"}, + }, + { + name: "invalid accept should give empty result", + accepts: []string{"///;;app/bar", "application/json", "application/yaml"}, + supported: []ContentType{"application/foo", "application/yaml", "application/json"}, + }, + { + name: "ignore extra parameters, e.g. q=0.8", + accepts: []string{"application/bar", "application/json;q=0.8", "application/yaml"}, + supported: []ContentType{"application/foo", "application/yaml", "application/json"}, + want: "application/json", + wantOk: true, + }, + { + name: "allow comma separation", + accepts: []string{"application/bar, application/json;q=0.8", "application/yaml"}, + supported: []ContentType{"application/foo", "application/yaml", "application/json"}, + want: "application/json", + wantOk: true, + }, + { + name: "accept all; choose the preferred one", + accepts: []string{"application/bar, */*;q=0.7", "application/yaml"}, + supported: []ContentType{"application/foo", "application/yaml", "application/json"}, + want: "application/foo", + wantOk: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := NewMetadata(metadata.WithAccept(tt.accepts...)) + got, gotOk := negotiateAccept(m, tt.supported) + assert.Equal(t, tt.want, got) + assert.Equal(t, tt.wantOk, gotOk) + }) + } +} diff --git a/pkg/content/segment_reader.go b/pkg/content/segment_reader.go new file mode 100644 index 00000000..62f408ce --- /dev/null +++ b/pkg/content/segment_reader.go @@ -0,0 +1,63 @@ +package content + +import ( + "context" + "io" + + "github.com/weaveworks/libgitops/pkg/tracing" + "go.opentelemetry.io/otel/trace" +) + +type segmentReader struct { + MetadataContainer + read *readSegmentContextLockImpl + close *closeContextLockImpl +} + +func (r *segmentReader) WithContext(ctx context.Context) ClosableRawSegmentReader { + return closableRawSegmentReader{&segmentReaderWithContext{r.read, ctx}, &closerWithContext{r.close, ctx}} +} + +func (r *segmentReader) RawSegmentReader() RawSegmentReader { return r.read.r } +func (r *segmentReader) RawCloser() io.Closer { return r.close.c } + +type segmentReaderWithContext struct { + read *readSegmentContextLockImpl + ctx context.Context +} + +func (r *segmentReaderWithContext) Read() (content []byte, err error) { + r.read.setContext(r.ctx) + content, err = r.read.Read() + r.read.clearContext() + return +} + +type readSegmentContextLockImpl struct { + contextLockImpl + r RawSegmentReader + metaGetter MetadataContainer + underlyingLock contextLock +} + +func (r *readSegmentContextLockImpl) Read() (content []byte, err error) { + ft := tracing.FromContext(r.ctx, r.r) + err = ft.TraceFunc(r.ctx, "ReadSegment", func(ctx context.Context, span trace.Span) error { + var tmperr error + if r.underlyingLock != nil { + r.underlyingLock.setContext(ctx) + } + content, tmperr = r.r.Read() + if r.underlyingLock != nil { + r.underlyingLock.clearContext() + } + span.SetAttributes(SpanAttrByteContent(content)...) + return tmperr + }, trace.WithAttributes(SpanAttrContentMetadata(r.metaGetter.ContentMetadata()))).RegisterCustom(SpanRegisterReadError) + return +} + +type closableRawSegmentReader struct { + RawSegmentReader + io.Closer +} diff --git a/pkg/content/tracing.go b/pkg/content/tracing.go new file mode 100644 index 00000000..f11eec83 --- /dev/null +++ b/pkg/content/tracing.go @@ -0,0 +1,33 @@ +package content + +import "go.opentelemetry.io/otel/attribute" + +const ( + SpanAttributeKeyByteContent = "byteContent" + SpanAttributeKeyByteContentLen = "byteContentLength" + SpanAttributeKeyByteContentCap = "byteContentCapacity" + SpanAttributeKeyContentMetadata = "contentMetadata" +) + +// SpanAttrByteContent registers byteContent and byteContentLength span attributes +// b should be the byte content that has been e.g. read or written in an io operation +func SpanAttrByteContent(b []byte) []attribute.KeyValue { + return []attribute.KeyValue{ + attribute.String(SpanAttributeKeyByteContent, string(b)), + attribute.Int64(SpanAttributeKeyByteContentLen, int64(len(b))), + } +} + +// SpanAttrByteContentCap extends SpanAttrByteContent with a capacity argument +// cap should be the capacity of e.g. that read or write, i.e. how much +// could have been read or written. +func SpanAttrByteContentCap(b []byte, cap int) []attribute.KeyValue { + return append(SpanAttrByteContent(b), + attribute.Int(SpanAttributeKeyByteContentCap, cap), + ) +} + +// TODO: This should be used upstream, too, or not? +func SpanAttrContentMetadata(m Metadata) attribute.KeyValue { + return attribute.Any(SpanAttributeKeyContentMetadata, m) +} diff --git a/pkg/content/writer.go b/pkg/content/writer.go new file mode 100644 index 00000000..167346ae --- /dev/null +++ b/pkg/content/writer.go @@ -0,0 +1,121 @@ +package content + +import ( + "context" + "io" + + "github.com/weaveworks/libgitops/pkg/content/metadata" + "github.com/weaveworks/libgitops/pkg/tracing" + "github.com/weaveworks/libgitops/pkg/util/compositeio" + "go.opentelemetry.io/otel/trace" +) + +func NewWriter(w io.Writer, opts ...metadata.HeaderOption) Writer { + // If it already is a Writer, just return it + ww, ok := w.(Writer) + if ok { + return ww + } + + // Use the closer if available + c, _ := w.(io.Closer) + // Never close stdio + if isStdio(w) { + c = nil + } + mb := NewMetadata(opts...).ToContainer() + + return &writer{ + MetadataContainer: mb, + write: &writeContextLockImpl{ + w: w, + metaGetter: mb, + // underlyingLock is nil + }, + close: &closeContextLockImpl{ + c: c, + metaGetter: mb, + // underlyingLock is nil + }, + } +} + +type writer struct { + MetadataContainer + write *writeContextLockImpl + close *closeContextLockImpl +} + +func (w *writer) WithContext(ctx context.Context) io.WriteCloser { + return compositeio.WriteCloser(&writerWithContext{w.write, ctx}, &closerWithContext{w.close, ctx}) +} +func (w *writer) RawWriter() io.Writer { return w.write.w } +func (w *writer) RawCloser() io.Closer { return w.close.c } + +func (w *writer) Wrap(wrapFn WrapWriterFunc) Writer { + newWriter := wrapFn(compositeio.WriteCloser(w.write, w.close)) + if newWriter == nil { + panic("newWriter must not be nil") + } + // If an io.Closer is not returned, close this + // Reader's stream instead. Importantly enough, + // a trace will be registered for both this + // Reader, and the returned one. + newCloser, ok := newWriter.(io.Closer) + if !ok { + newCloser = w.close + } + + mb := w.ContentMetadata().Clone().ToContainer() + + return &writer{ + MetadataContainer: mb, + write: &writeContextLockImpl{ + w: newWriter, + metaGetter: mb, + underlyingLock: w.write, + }, + close: &closeContextLockImpl{ + c: newCloser, + metaGetter: mb, + underlyingLock: w.close, + }, + } +} + +type writerWithContext struct { + write *writeContextLockImpl + ctx context.Context +} + +func (w *writerWithContext) Write(p []byte) (n int, err error) { + w.write.setContext(w.ctx) + n, err = w.write.Write(p) + w.write.clearContext() + return +} + +type writeContextLockImpl struct { + contextLockImpl + w io.Writer + metaGetter MetadataContainer + underlyingLock contextLock +} + +func (r *writeContextLockImpl) Write(p []byte) (n int, err error) { + ft := tracing.FromContext(r.ctx, r.w) + err = ft.TraceFunc(r.ctx, "Write", func(ctx context.Context, span trace.Span) error { + var tmperr error + if r.underlyingLock != nil { + r.underlyingLock.setContext(ctx) + } + n, tmperr = r.w.Write(p) + if r.underlyingLock != nil { + r.underlyingLock.clearContext() + } + // Register metadata in the span + span.SetAttributes(SpanAttrByteContentCap(p[:n], len(p))...) + return tmperr + }, trace.WithAttributes(SpanAttrContentMetadata(r.metaGetter.ContentMetadata()))).Register() + return +} diff --git a/pkg/frame/constructors.go b/pkg/frame/constructors.go new file mode 100644 index 00000000..6e8ebe49 --- /dev/null +++ b/pkg/frame/constructors.go @@ -0,0 +1,104 @@ +package frame + +import ( + "bytes" + "context" + + "github.com/weaveworks/libgitops/pkg/content" +) + +// 2 generic Reader constructors + +func NewSingleReader(ct content.ContentType, r content.Reader, opts ...SingleReaderOption) Reader { + return internalFactoryVar.NewSingleReader(ct, r, opts...) +} + +func NewRecognizingReader(ctx context.Context, r content.Reader, opts ...RecognizingReaderOption) Reader { + return internalFactoryVar.NewRecognizingReader(ctx, r, opts...) +} + +// 4 JSON-YAML Reader constructors using the default factory + +func NewYAMLReader(r content.Reader, opts ...ReaderOption) Reader { + return internalFactoryVar.NewReader(content.ContentTypeYAML, r, opts...) +} + +func NewJSONReader(r content.Reader, opts ...ReaderOption) Reader { + return internalFactoryVar.NewReader(content.ContentTypeJSON, r, opts...) +} + +func NewSingleYAMLReader(r content.Reader, opts ...SingleReaderOption) Reader { + return NewSingleReader(content.ContentTypeYAML, r, opts...) +} + +func NewSingleJSONReader(r content.Reader, opts ...SingleReaderOption) Reader { + return NewSingleReader(content.ContentTypeJSON, r, opts...) +} + +// 2 generic Writer constructors + +func NewSingleWriter(ct content.ContentType, w content.Writer, opts ...SingleWriterOption) Writer { + return internalFactoryVar.NewSingleWriter(ct, w, opts...) +} + +func NewRecognizingWriter(r content.Writer, opts ...RecognizingWriterOption) Writer { + return internalFactoryVar.NewRecognizingWriter(r, opts...) +} + +// 4 JSON-YAML Writer constructors using the default factory + +func NewYAMLWriter(r content.Writer, opts ...WriterOption) Writer { + return internalFactoryVar.NewWriter(content.ContentTypeYAML, r, opts...) +} + +func NewJSONWriter(r content.Writer, opts ...WriterOption) Writer { + return internalFactoryVar.NewWriter(content.ContentTypeJSON, r, opts...) +} + +func NewSingleYAMLWriter(r content.Writer, opts ...SingleWriterOption) Writer { + return internalFactoryVar.NewSingleWriter(content.ContentTypeYAML, r, opts...) +} + +func NewSingleJSONWriter(r content.Writer, opts ...SingleWriterOption) Writer { + return internalFactoryVar.NewSingleWriter(content.ContentTypeJSON, r, opts...) +} + +// 1 single, 3 YAML and 1 recognizing content.Reader helper constructors + +/*func FromSingleBuffer(ct content.ContentType, buf *bytes.Buffer, opts ...SingleReaderOption) Reader { + return NewSingleReader(ct, content.FromBuffer(buf), opts...) +}*/ + +func FromYAMLBytes(yamlBytes []byte, opts ...ReaderOption) Reader { + return NewYAMLReader(content.FromBytes(yamlBytes), opts...) +} + +func FromYAMLString(yamlStr string, opts ...ReaderOption) Reader { + return NewYAMLReader(content.FromString(yamlStr), opts...) +} + +func FromYAMLFile(filePath string, opts ...ReaderOption) Reader { + return NewYAMLReader(content.FromFile(filePath), opts...) +} + +func FromFile(ctx context.Context, filePath string, opts ...RecognizingReaderOption) Reader { + return NewRecognizingReader(ctx, content.FromFile(filePath), opts...) +} + +// 1 single, 2 YAML and 1 recognizing content.Writer helper constructors + +func ToSingleBuffer(ct content.ContentType, buf *bytes.Buffer, opts ...SingleWriterOption) Writer { + return NewSingleWriter(ct, content.ToBuffer(buf), opts...) +} + +func ToYAMLBuffer(buf *bytes.Buffer, opts ...WriterOption) Writer { + return NewYAMLWriter(content.NewWriter(buf), opts...) +} + +func ToYAMLFile(filePath string, opts ...WriterOption) Writer { + return NewYAMLWriter(content.ToFile(filePath), opts...) +} + +func ToFile(filePath string, opts ...RecognizingWriterOption) Writer { + return NewRecognizingWriter(content.ToFile(filePath), opts...) +} diff --git a/pkg/frame/errors.go b/pkg/frame/errors.go new file mode 100644 index 00000000..e4539ce1 --- /dev/null +++ b/pkg/frame/errors.go @@ -0,0 +1,38 @@ +package frame + +import ( + "fmt" + + "github.com/weaveworks/libgitops/pkg/util/limitedio" + "github.com/weaveworks/libgitops/pkg/util/structerr" +) + +// Enforce all struct errors implementing structerr.StructError +var _ structerr.StructError = &FrameCountOverflowError{} + +// FrameCountOverflowError is returned when a Reader or Writer would process more +// frames than allowed. +type FrameCountOverflowError struct { + // +optional + MaxFrameCount limitedio.Limit +} + +func (e *FrameCountOverflowError) Error() string { + msg := "no more frames can be processed, hit maximum amount" + if e.MaxFrameCount < 0 { + msg = fmt.Sprintf("%s: infinity", msg) // this is most likely a programming error + } else if e.MaxFrameCount > 0 { + msg = fmt.Sprintf("%s: %d", msg, e.MaxFrameCount) + } + return msg +} + +func (e *FrameCountOverflowError) Is(target error) bool { + _, ok := target.(*FrameCountOverflowError) + return ok +} + +// ErrFrameCountOverflow creates a *FrameCountOverflowError +func ErrFrameCountOverflow(maxFrames limitedio.Limit) *FrameCountOverflowError { + return &FrameCountOverflowError{MaxFrameCount: maxFrames} +} diff --git a/pkg/frame/interfaces.go b/pkg/frame/interfaces.go new file mode 100644 index 00000000..e6224849 --- /dev/null +++ b/pkg/frame/interfaces.go @@ -0,0 +1,160 @@ +package frame + +import ( + "context" + + "github.com/weaveworks/libgitops/pkg/content" +) + +// TODO: Maybe implement/use context-aware (cancellable) io.Readers and io.Writers underneath? + +// Closer is like io.Closer, but with a Context passed along as well. +type Closer interface { + // Close closes the underlying resource. If Close is called multiple times, the + // underlying io.Closer decides the behavior and return value. If Close is called + // during a Read/Write operation, the underlying io.ReadCloser/io.WriteCloser + // decides the behavior. + Close(ctx context.Context) error +} + +// Reader is a framing type specific reader of an underlying io.Reader or io.ReadCloser. +// If an io.Reader is used, Close(ctx) is a no-op. If an io.ReadCloser is used, Close(ctx) +// will close the underlying io.ReadCloser. +// +// The Reader returns frames, as defined by the relevant framing type. +// For example, for YAML a frame represents a YAML document, while JSON is a self-framing +// format, i.e. encoded objects can be written to a stream just as +// '{ "a": "" ... }{ "b": "" ... }' and separated from there. +// +// Another way of defining a "frame" is that it MUST contain exactly one decodable object. +// This means that no empty (i.e. len(frame) == 0) frames shall be returned. Note: The decodable +// object might represent a list object (e.g. as Kubernetes' v1.List); more generally something +// decodable into a Go struct. +// +// The Reader can use as many underlying Read(p []byte) (n int, err error) calls it needs +// to the underlying io.Read(Clos)er. As long as frames can successfully be read from the underlying +// io.Read(Clos)er, len(frame) != 0 and err == nil. When io.EOF is encountered, len(frame) == 0 and +// errors.Is(err, io.EOF) == true. +// +// The Reader MUST be thread-safe, i.e. it must use the underlying io.Reader responsibly +// without causing race conditions when reading, e.g. by guarding reads with a mutual +// exclusion lock (mutex). The mutex isn't locked for closes, however. This enables e.g. closing the +// reader during a read operation, and other custom closing behaviors. +// +// The Reader MUST directly abort the read operation if the frame size exceeds +// ReadWriterOptions.MaxFrameSize, and return ErrFrameSizeOverflow. +// +// The Reader MUST return ErrFrameCountOverflow if the underlying Reader has returned more than +// ReadWriterOptions.MaxFrameCount successful read operations. The "total" frame limit is +// 10 * ReadWriterOptions.MaxFrameCount, which includes failed, empty and successful frames. +// Returned errors (including io.EOF) MUST be checked for equality using +// errors.Is(err, target), NOT using err == target. +// +// TODO: Say that the ContentType is assumed constant per content.Reader +// +// The Reader MAY respect cancellation signals on the context, depending on ReaderOptions. +// The Reader MAY support reporting trace spans for how long certain operations take. +type Reader interface { + // The Reader is specific to possibly multiple framing types + content.ContentTyped + + // ReadFrame reads one frame from the underlying io.Read(Clos)er. At maximum, the frame is as + // large as ReadWriterOptions.MaxFrameSize. See the documentation on the Reader interface for more + // details. + ReadFrame(ctx context.Context) ([]byte, error) + + // Exposes Metadata about the underlying io.Reader + content.MetadataContainer + + // The Reader can be closed. If an underlying io.Reader is used, this is a no-op. If an + // io.ReadCloser is used, this will close that io.ReadCloser. + Closer +} + +type ReaderFactory interface { + // ct is dominant; will error if r has a conflicting content type + // ct must be one of the supported content types + NewReader(ct content.ContentType, r content.Reader, opts ...ReaderOption) Reader + // opts.MaxFrameCount is dominant, will always be set to 1 + // ct can be anything + // ct is dominant; will error if r has a conflicting content type + // Single options should not have MaxFrameCount at all, if possible + NewSingleReader(ct content.ContentType, r content.Reader, opts ...SingleReaderOption) Reader + // will use the content type from r if set, otherwise infer from content metadata + // or peek bytes using the content.ContentTypeRecognizer + // should add to options for a recognizer + NewRecognizingReader(ctx context.Context, r content.Reader, opts ...RecognizingReaderOption) Reader + + //SupportedContentTypes() +} + +// Writer is a framing type specific writer to an underlying io.Writer or io.WriteCloser. +// If an io.Writer is used, Close(ctx) is a no-op. If an io.WriteCloser is used, Close(ctx) +// will close the underlying io.WriteCloser. +// +// The Writer writes frames to the underlying stream, as defined by the framing type. +// For example, for YAML a frame represents a YAML document, while JSON is a self-framing +// format, i.e. encoded objects can be written to a stream just as +// '{ "a": "" ... }{ "b": "" ... }'. +// +// Another way of defining a "frame" is that it MUST contain exactly one decodable object. +// It is valid (but not recommended) to supply empty frames to the Writer. +// +// Writer will only call the underlying io.Write(Close)r's Write(p []byte) call once. +// If n < len(frame) and err == nil, io.ErrShortWrite will be returned. This means that +// it's the underlying io.Writer's responsibility to buffer the frame data, if needed. +// +// The Writer MUST be thread-safe, i.e. it must use the underlying io.Writer responsibly +// without causing race conditions when reading, e.g. by guarding writes/closes with a +// mutual exclusion lock (mutex). The mutex isn't locked for closes, however. +// This enables e.g. closing the writer during a write operation, and other custom closing behaviors. +// +// The Writer MUST directly abort the write operation if the frame size exceeds ReadWriterOptions.MaxFrameSize, +// and return ErrFrameSizeOverflow. The Writer MUST ignore empty frames, where len(frame) == 0, possibly +// after sanitation. The Writer MUST return ErrFrameCountOverflow if WriteFrame has been called more than +// ReadWriterOptions.MaxFrameCount times. +// +// Returned errors MUST be checked for equality using errors.Is(err, target), NOT using err == target. +// +// The Writer MAY respect cancellation signals on the context, depending on WriterOptions. +// The Writer MAY support reporting trace spans for how long certain operations take. +// +// TODO: Say that the ContentType is assumed constant per content.Writer +type Writer interface { + // The Writer is specific to this framing type. + content.ContentTyped + // WriteFrame writes one frame to the underlying io.Write(Close)r. + // See the documentation on the Writer interface for more details. + WriteFrame(ctx context.Context, frame []byte) error + + // Exposes metadata from the underlying content.Writer + content.MetadataContainer + + // The Writer can be closed. If an underlying io.Writer is used, this is a no-op. If an + // io.WriteCloser is used, this will close that io.WriteCloser. + Closer +} + +type WriterFactory interface { + // ct is dominant; will error if r has a conflicting content type + // ct must be one of the supported content types + NewWriter(ct content.ContentType, w content.Writer, opts ...WriterOption) Writer + // opts.MaxFrameCount is dominant, will always be set to 1 + // ct can be anything + // ct is dominant; will error if r has a conflicting content type + // Single options should not have MaxFrameCount at all, if possible + NewSingleWriter(ct content.ContentType, w content.Writer, opts ...SingleWriterOption) Writer + // will use the content type from r if set, otherwise infer from content metadata + // using the content.ContentTypeRecognizer + // should add to options for a recognizer + NewRecognizingWriter(w content.Writer, opts ...RecognizingWriterOption) Writer + + // The SupportedContentTypes() method specifies what content types are supported by the + // NewWriter + content.ContentTypeSupporter +} + +type Factory interface { + ReaderFactory + WriterFactory +} diff --git a/pkg/frame/k8s_reader_streaming.go b/pkg/frame/k8s_reader_streaming.go new file mode 100644 index 00000000..9ff21cec --- /dev/null +++ b/pkg/frame/k8s_reader_streaming.go @@ -0,0 +1,110 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file provides a means to read one whole frame from an io.ReadCloser +// returned by a k8s.io/apimachinery/pkg/runtime.Framer.NewFrameReader() +// +// This code is (temporarily) forked and derived from +// https://github.com/kubernetes/apimachinery/blob/v0.21.2/pkg/runtime/serializer/streaming/streaming.go +// and will be upstreamed if maintainers allow. The reason for forking this +// small piece of code is two-fold: a) This functionality is bundled within +// a runtime.Decoder, not provided as "just" some type of Reader, b) The +// upstream doesn't allow to configure the maximum frame size. + +package frame + +import ( + "fmt" + "io" + + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/util/limitedio" + "k8s.io/apimachinery/pkg/runtime/serializer/streaming" +) + +// Ref: https://github.com/kubernetes/apimachinery/blob/v0.21.2/pkg/runtime/serializer/streaming/streaming.go#L63-L67 +func newK8sStreamingReader(rc io.ReadCloser, maxFrameSize int64) content.ClosableRawSegmentReader { + if maxFrameSize == 0 { + maxFrameSize = limitedio.DefaultMaxReadSize.Int64() + } + + return &k8sStreamingReaderImpl{ + reader: rc, + buf: make([]byte, 1024), + // CHANGE: maxBytes is configurable + maxBytes: maxFrameSize, + } +} + +// Ref: https://github.com/kubernetes/apimachinery/blob/v0.21.2/pkg/runtime/serializer/streaming/streaming.go#L51-L57 +type k8sStreamingReaderImpl struct { + reader io.ReadCloser + buf []byte + // CHANGE: In the original code, maxBytes was an int. int64 is more specific and flexible, however. + // TODO: Re-review this code; shall we have int or int64 here? + maxBytes int64 + resetRead bool +} + +// Ref: https://github.com/kubernetes/apimachinery/blob/v0.21.2/pkg/runtime/serializer/streaming/streaming.go#L75-L106 +func (d *k8sStreamingReaderImpl) Read() ([]byte, error) { + base := 0 + for { + n, err := d.reader.Read(d.buf[base:]) + if err == io.ErrShortBuffer { + if n == 0 { + return nil, fmt.Errorf("got short buffer with n=0, base=%d, cap=%d", base, cap(d.buf)) + } + if d.resetRead { + continue + } + // double the buffer size up to maxBytes + // NOTE: This might need changing upstream eventually, it only works when + // d.maxBytes/len(d.buf) is a multiple of 2 + // CHANGE: In the original code no cast from int -> int64 was needed + bufLen := int64(len(d.buf)) + if bufLen < d.maxBytes { + base += n + // CHANGE: Instead of unconditionally doubling the buffer, double the buffer + // length only to the extent it fits within d.maxBytes. Previously, it was a + // requirement that d.maxBytes was a multiple of 1024 for this logic to work. + newBytes := len(d.buf) + if d.maxBytes < 2*bufLen { + newBytes = int(d.maxBytes - bufLen) + } + d.buf = append(d.buf, make([]byte, newBytes)...) + continue + } + // must read the rest of the frame (until we stop getting ErrShortBuffer) + d.resetRead = true + // base = 0 // CHANGE: Not needed (as pointed out by golangci-lint:ineffassign) + return nil, streaming.ErrObjectTooLarge + } + if err != nil { + return nil, err + } + if d.resetRead { + // now that we have drained the large read, continue + d.resetRead = false + continue + } + base += n + break + } + return d.buf[:base], nil +} + +func (d *k8sStreamingReaderImpl) Close() error { return d.reader.Close() } diff --git a/pkg/frame/k8s_reader_yaml.go b/pkg/frame/k8s_reader_yaml.go new file mode 100644 index 00000000..eac7c50c --- /dev/null +++ b/pkg/frame/k8s_reader_yaml.go @@ -0,0 +1,130 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file provides a means to extract one YAML frame from an io.ReadCloser +// +// This code is (temporarily) forked and derived from +// https://github.com/kubernetes/apimachinery/blob/v0.21.2/pkg/util/yaml/decoder.go#L111 +// and will be upstreamed if maintainers allow. The reason for forking this +// small piece of code is two-fold: a) The upstream doesn't allow configuring +// the maximum frame size, but hard-codes it to 5MB and b) for the first +// frame, the "---\n" prefix is returned and would otherwise be unnecessarily +// counted as frame content, when it actually is a frame separator. + +package frame + +import ( + "bufio" + "bytes" + "io" +) + +// k8sYAMLReader reads chunks of objects and returns ErrShortBuffer if +// the data is not sufficient. +type k8sYAMLReader struct { + r io.ReadCloser + scanner *bufio.Scanner + remaining []byte +} + +// newK8sYAMLReader decodes YAML documents from the provided +// stream in chunks by converting each document (as defined by +// the YAML spec) into its own chunk. io.ErrShortBuffer will be +// returned if the entire buffer could not be read to assist +// the caller in framing the chunk. +func newK8sYAMLReader(r io.ReadCloser, maxFrameSize int) io.ReadCloser { + scanner := bufio.NewScanner(r) + // the size of initial allocation for buffer 4k + buf := make([]byte, 4*1024) + // the maximum size used to buffer a token 5M + scanner.Buffer(buf, maxFrameSize) + scanner.Split(splitYAMLDocument) + return &k8sYAMLReader{ + r: r, + scanner: scanner, + } +} + +// Read reads the previous slice into the buffer, or attempts to read +// the next chunk. +// TODO: switch to readline approach. +func (d *k8sYAMLReader) Read(data []byte) (n int, err error) { + left := len(d.remaining) + if left == 0 { + // return the next chunk from the stream + if !d.scanner.Scan() { + err := d.scanner.Err() + if err == nil { + err = io.EOF + } + return 0, err + } + out := d.scanner.Bytes() + // TODO: This could be removed by the sanitation step; we don't have to + // do it here at this point. + out = bytes.TrimPrefix(out, []byte("---\n")) + d.remaining = out + left = len(out) + } + + // fits within data + if left <= len(data) { + copy(data, d.remaining) + d.remaining = nil + return left, nil + } + + // caller will need to reread + copy(data, d.remaining[:len(data)]) + d.remaining = d.remaining[len(data):] + return len(data), io.ErrShortBuffer +} + +func (d *k8sYAMLReader) Close() error { + return d.r.Close() +} + +const yamlSeparator = "\n---" + +// splitYAMLDocument is a bufio.SplitFunc for splitting YAML streams into individual documents. +func splitYAMLDocument(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, nil + } + sep := len([]byte(yamlSeparator)) + if i := bytes.Index(data, []byte(yamlSeparator)); i >= 0 { + // We have a potential document terminator + i += sep + after := data[i:] + if len(after) == 0 { + // we can't read any more characters + if atEOF { + return len(data), data[:len(data)-sep], nil + } + return 0, nil, nil + } + if j := bytes.IndexByte(after, '\n'); j >= 0 { + return i + j + 1, data[0 : i-sep], nil + } + return 0, nil, nil + } + // If we're at EOF, we have a final, non-terminated line. Return it. + if atEOF { + return len(data), data, nil + } + // Request more data. + return 0, nil, nil +} diff --git a/pkg/frame/options.go b/pkg/frame/options.go new file mode 100644 index 00000000..897e78fc --- /dev/null +++ b/pkg/frame/options.go @@ -0,0 +1,153 @@ +package frame + +import ( + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/frame/sanitize" + "github.com/weaveworks/libgitops/pkg/util/limitedio" +) + +// DefaultMaxFrameCount specifies the default maximum of frames that can be read by a Reader. +const DefaultReadMaxFrameCount = 1024 + +type singleReaderOptions struct{ SingleOptions } +type singleWriterOptions struct{ SingleOptions } +type readerOptions struct{ Options } +type writerOptions struct{ Options } +type recognizingReaderOptions struct{ RecognizingOptions } +type recognizingWriterOptions struct{ RecognizingOptions } + +func defaultSingleReaderOptions() *singleReaderOptions { + return &singleReaderOptions{ + SingleOptions: SingleOptions{ + MaxFrameSize: limitedio.DefaultMaxReadSize, + Sanitizer: sanitize.NewJSONYAML(), + }, + } +} + +func defaultSingleWriterOptions() *singleWriterOptions { + return &singleWriterOptions{ + SingleOptions: SingleOptions{ + MaxFrameSize: limitedio.Infinite, + Sanitizer: sanitize.NewJSONYAML(), + }, + } +} + +func defaultReaderOptions() *readerOptions { + return &readerOptions{ + Options: Options{ + SingleOptions: defaultSingleReaderOptions().SingleOptions, + MaxFrameCount: DefaultReadMaxFrameCount, + }, + } +} + +func defaultWriterOptions() *writerOptions { + return &writerOptions{ + Options: Options{ + SingleOptions: defaultSingleWriterOptions().SingleOptions, + MaxFrameCount: limitedio.Infinite, + }, + } +} + +func defaultRecognizingReaderOptions() *recognizingReaderOptions { + return &recognizingReaderOptions{ + RecognizingOptions: RecognizingOptions{ + Options: defaultReaderOptions().Options, + Recognizer: content.NewJSONYAMLContentTypeRecognizer(), + }, + } +} + +func defaultRecognizingWriterOptions() *recognizingWriterOptions { + return &recognizingWriterOptions{ + RecognizingOptions: RecognizingOptions{ + Options: defaultWriterOptions().Options, + Recognizer: content.NewJSONYAMLContentTypeRecognizer(), + }, + } +} + +type SingleOptions struct { + // MaxFrameSize specifies the maximum allowed frame size that can be read and returned. + // Must be a positive integer. Defaults to DefaultMaxFrameSize. TODO + MaxFrameSize limitedio.Limit + // Sanitizer configures the sanitizer that should be used for sanitizing the frames. + Sanitizer sanitize.Sanitizer + // TODO: Experiment + //MetadataOptions []metadata.HeaderOption +} + +func (o SingleOptions) applyToSingle(target *SingleOptions) { + if o.MaxFrameSize != 0 { + target.MaxFrameSize = o.MaxFrameSize + } + if o.Sanitizer != nil { + target.Sanitizer = o.Sanitizer + } + /*if len(o.MetadataOptions) != 0 { + target.MetadataOptions = append(target.MetadataOptions, o.MetadataOptions...) + }*/ +} + +type Options struct { + SingleOptions + + // MaxFrameCount specifies the maximum amount of successful frames that can be read or written + // using a Reader or Writer. This means that e.g. empty frames after sanitation are NOT + // counted as a frame in this context. When reading, there can be a maximum of 10*MaxFrameCount + // in total (including failed and empty). Must be a positive integer. Defaults: TODO DefaultMaxFrameCount. + MaxFrameCount limitedio.Limit +} + +func (o Options) applyTo(target *Options) { + if o.MaxFrameCount != 0 { + target.MaxFrameCount = o.MaxFrameCount + } + o.applyToSingle(&target.SingleOptions) +} + +type RecognizingOptions struct { + Options + + Recognizer content.ContentTypeRecognizer +} + +func (o RecognizingOptions) applyToRecognizing(target *RecognizingOptions) { + if o.Recognizer != nil { + target.Recognizer = o.Recognizer + } + o.applyTo(&target.Options) +} + +type SingleReaderOption interface { + ApplyToSingleReader(target *singleReaderOptions) +} + +type SingleWriterOption interface { + ApplyToSingleWriter(target *singleWriterOptions) +} + +type ReaderOption interface { + ApplyToReader(target *readerOptions) +} + +type WriterOption interface { + ApplyToWriter(target *writerOptions) +} + +type RecognizingReaderOption interface { + ApplyToRecognizingReader(target *recognizingReaderOptions) +} + +type RecognizingWriterOption interface { + ApplyToRecognizingWriter(target *recognizingWriterOptions) +} + +/* +TODO: Is this needed? +func WithMetadata(mopts ...metadata.HeaderOption) SingleOptions { + return SingleOptions{MetadataOptions: mopts} +}*/ diff --git a/pkg/frame/options_boilerplate.go b/pkg/frame/options_boilerplate.go new file mode 100644 index 00000000..097421f4 --- /dev/null +++ b/pkg/frame/options_boilerplate.go @@ -0,0 +1,114 @@ +package frame + +var ( + _ SingleReaderOption = SingleOptions{} + _ SingleWriterOption = SingleOptions{} + _ ReaderOption = SingleOptions{} + _ WriterOption = SingleOptions{} + _ RecognizingReaderOption = SingleOptions{} + _ RecognizingWriterOption = SingleOptions{} + + _ SingleReaderOption = Options{} + _ SingleWriterOption = Options{} + _ ReaderOption = Options{} + _ WriterOption = Options{} + _ RecognizingReaderOption = Options{} + _ RecognizingWriterOption = Options{} + + _ SingleReaderOption = RecognizingOptions{} + _ SingleWriterOption = RecognizingOptions{} + _ ReaderOption = RecognizingOptions{} + _ WriterOption = RecognizingOptions{} + _ RecognizingReaderOption = RecognizingOptions{} + _ RecognizingWriterOption = RecognizingOptions{} +) + +func (o SingleOptions) ApplyToSingleReader(target *singleReaderOptions) { + o.applyToSingle(&target.SingleOptions) +} + +func (o SingleOptions) ApplyToSingleWriter(target *singleWriterOptions) { + o.applyToSingle(&target.SingleOptions) +} + +func (o SingleOptions) ApplyToReader(target *readerOptions) { + o.applyToSingle(&target.SingleOptions) +} + +func (o SingleOptions) ApplyToWriter(target *writerOptions) { + o.applyToSingle(&target.SingleOptions) +} + +func (o SingleOptions) ApplyToRecognizingReader(target *recognizingReaderOptions) { + o.applyToSingle(&target.SingleOptions) +} + +func (o SingleOptions) ApplyToRecognizingWriter(target *recognizingWriterOptions) { + o.applyToSingle(&target.SingleOptions) +} + +func (o Options) ApplyToReader(target *readerOptions) { + o.applyTo(&target.Options) +} + +func (o Options) ApplyToWriter(target *writerOptions) { + o.applyTo(&target.Options) +} + +func (o Options) ApplyToRecognizingReader(target *recognizingReaderOptions) { + o.applyTo(&target.Options) +} + +func (o Options) ApplyToRecognizingWriter(target *recognizingWriterOptions) { + o.applyTo(&target.Options) +} + +func (o RecognizingOptions) ApplyToRecognizingReader(target *recognizingReaderOptions) { + o.applyToRecognizing(&target.RecognizingOptions) +} + +func (o RecognizingOptions) ApplyToRecognizingWriter(target *recognizingWriterOptions) { + o.applyToRecognizing(&target.RecognizingOptions) +} + +func (o *singleReaderOptions) applyOptions(opts []SingleReaderOption) *singleReaderOptions { + for _, opt := range opts { + opt.ApplyToSingleReader(o) + } + return o +} + +func (o *singleWriterOptions) applyOptions(opts []SingleWriterOption) *singleWriterOptions { + for _, opt := range opts { + opt.ApplyToSingleWriter(o) + } + return o +} + +func (o *readerOptions) applyOptions(opts []ReaderOption) *readerOptions { + for _, opt := range opts { + opt.ApplyToReader(o) + } + return o +} + +func (o *writerOptions) applyOptions(opts []WriterOption) *writerOptions { + for _, opt := range opts { + opt.ApplyToWriter(o) + } + return o +} + +func (o *recognizingReaderOptions) applyOptions(opts []RecognizingReaderOption) *recognizingReaderOptions { + for _, opt := range opts { + opt.ApplyToRecognizingReader(o) + } + return o +} + +func (o *recognizingWriterOptions) applyOptions(opts []RecognizingWriterOption) *recognizingWriterOptions { + for _, opt := range opts { + opt.ApplyToRecognizingWriter(o) + } + return o +} diff --git a/pkg/frame/options_test.go b/pkg/frame/options_test.go new file mode 100644 index 00000000..73c05271 --- /dev/null +++ b/pkg/frame/options_test.go @@ -0,0 +1,153 @@ +package frame + +/* +func compareOptions(t *testing.T, name string, got, want interface{}) { + // We want to include the unexported tracer field when comparing TracerOptions, hence use reflect.DeepEqual + // for the comparison + opt := cmp.Comparer(func(x, y tracing.TracerOptions) bool { + return reflect.DeepEqual(x, y) + }) + // Report error with diff if not equal + if !cmp.Equal(got, want, opt) { + t.Errorf("%s: got vs want: %s", name, cmp.Diff(got, want, opt)) + } +} + +func TestApplyReaderOptions(t *testing.T) { + defaultWithMutation := func(apply func(*ReaderOptions)) *ReaderOptions { + o := defaultReaderOpts() + apply(o) + return o + } + tests := []struct { + name string + opts []ReaderOption + fromDefault bool + want *ReaderOptions + }{ + { + name: "simple defaults", + fromDefault: true, + want: defaultReaderOpts(), + }, + { + name: "MaxFrameSize: apply", + opts: []ReaderOption{&ReaderWriterOptions{MaxFrameSize: 1234}}, + want: &ReaderOptions{ReaderWriterOptions: ReaderWriterOptions{MaxFrameSize: 1234}}, + }, + { + name: "MaxFrameSize: override default", + opts: []ReaderOption{&ReaderWriterOptions{MaxFrameSize: 1234}}, + fromDefault: true, + want: defaultWithMutation(func(ro *ReaderOptions) { + ro.MaxFrameSize = 1234 + }), + }, + { + name: "MaxFrameSize: zero value has no effect", + opts: []ReaderOption{&ReaderWriterOptions{MaxFrameSize: 0}}, + fromDefault: true, + want: defaultReaderOpts(), + }, + { + name: "MaxFrameSize: latter overrides earlier, if set", + opts: []ReaderOption{ + &ReaderWriterOptions{MaxFrameSize: 1234}, + &ReaderWriterOptions{MaxFrameSize: 4321}, + &ReaderWriterOptions{MaxFrameSize: 0}, + }, + want: &ReaderOptions{ReaderWriterOptions: ReaderWriterOptions{MaxFrameSize: 4321}}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var from *ReaderOptions + if tt.fromDefault { + from = defaultReaderOpts() + } else { + from = &ReaderOptions{} + } + + got := from.ApplyOptions(tt.opts) + compareOptions(t, "TestApplyReaderOptions", got, tt.want) + }) + } +} + + +func TestApplyReaderWriterOptions(t *testing.T) { + defReadWithMutation := func(apply func(*ReaderOptions)) *ReaderOptions { + o := defaultReaderOpts() + apply(o) + return o + } + defWriteWithMutation := func(apply func(*WriterOptions)) *WriterOptions { + o := defaultWriterOpts() + apply(o) + return o + } + barTracer := otel.GetTracerProvider().Tracer("bar") + tests := []struct { + name string + opts []ReaderWriterOption + fromDefault bool + wantReader *ReaderOptions + wantWriter *WriterOptions + }{ + { + name: "simple defaults", + fromDefault: true, + wantReader: defaultReaderOpts(), + wantWriter: defaultWriterOpts(), + }, + { + name: "WithTracerOptions: Set Tracer.Name", + fromDefault: true, + opts: []ReaderWriterOption{WithTracerOptions(tracing.TracerOptions{Name: "foo"})}, + wantReader: defReadWithMutation(func(ro *ReaderOptions) { + ro.Tracer.Name = "foo" + }), + wantWriter: defWriteWithMutation(func(wo *WriterOptions) { + wo.Tracer.Name = "foo" + }), + }, + { + name: "WithTracerOptions: Set Tracer", + fromDefault: true, + opts: []ReaderWriterOption{WithTracerOptions(tracing.WithTracer(barTracer))}, + wantReader: defReadWithMutation(func(ro *ReaderOptions) { + // The tracer field is private, hence we need to configure it like this + tracing.WithTracer(barTracer).ApplyToTracer(&ro.Tracer) + }), + wantWriter: defWriteWithMutation(func(wo *WriterOptions) { + // The tracer field is private, hence we need to configure it like this + tracing.WithTracer(barTracer).ApplyToTracer(&wo.Tracer) + }), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var fromReader *ReaderOptions + var fromWriter *WriterOptions + if tt.fromDefault { + fromReader = defaultReaderOpts() + fromWriter = defaultWriterOpts() + } else { + fromReader = &ReaderOptions{} + fromWriter = &WriterOptions{} + } + + readOpts := []ReaderOption{} + writeOpts := []WriterOption{} + for _, opt := range tt.opts { + readOpts = append(readOpts, opt) + writeOpts = append(writeOpts, opt) + } + + gotReader := fromReader.ApplyOptions(readOpts) + gotWriter := fromWriter.ApplyOptions(writeOpts) + compareOptions(t, "TestApplyReaderWriterOptions", gotReader, tt.wantReader) + compareOptions(t, "TestApplyReaderWriterOptions", gotWriter, tt.wantWriter) + }) + } +}*/ diff --git a/pkg/frame/reader.go b/pkg/frame/reader.go new file mode 100644 index 00000000..b2800d32 --- /dev/null +++ b/pkg/frame/reader.go @@ -0,0 +1,113 @@ +package frame + +import ( + "context" + "sync" + + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/frame/sanitize" + "github.com/weaveworks/libgitops/pkg/tracing" + "github.com/weaveworks/libgitops/pkg/util/limitedio" + "go.opentelemetry.io/otel/trace" +) + +// newHighlevelReader takes a "low-level" Reader (like *streamingReader or *yamlReader), +// and implements higher-level logic like proper closing, mutex locking and tracing. +func newHighlevelReader(r Reader, o *readerOptions) Reader { + return &highlevelReader{ + read: r, + readMu: &sync.Mutex{}, + opts: o, + maxTotalFrames: limitedio.Limit(o.MaxFrameCount * 10), + } +} + +// highlevelReader uses the closableResource for the mutex locking, properly handling +// the close logic, and initiating the trace spans. On top of that it records extra +// tracing context in ReadFrame. +type highlevelReader struct { + read Reader + // readMu guards read.ReadFrame + readMu *sync.Mutex + + opts *readerOptions + // maxTotalFrames is set to opts.MaxFrameCount * 10 + maxTotalFrames limitedio.Limit + // successfulFrameCount counts the amount of successful frames read + successfulFrameCount int64 + // totalFrameCount counts the total amount of frames read (including empty and failed ones) + totalFrameCount int64 +} + +func (r *highlevelReader) ReadFrame(ctx context.Context) ([]byte, error) { + // Make sure we have access to the underlying resource + r.readMu.Lock() + defer r.readMu.Unlock() + + var frame []byte + err := tracing.FromContext(ctx, r). + TraceFunc(ctx, "ReadFrame", func(ctx context.Context, span trace.Span) error { + + // Refuse to read more than the maximum amount of successful frames + if r.opts.MaxFrameCount.IsLessThan(r.successfulFrameCount) { + return ErrFrameCountOverflow(r.opts.MaxFrameCount) + } + + // Call the underlying reader + var err error + frame, err = r.readFrame(ctx) + if err != nil { + return err + } + + // Record how large the frame is, and its content for debugging + span.SetAttributes(content.SpanAttrByteContent(frame)...) + return nil + }).RegisterCustom(content.SpanRegisterReadError) + // SpanRegisterReadError registers io.EOF as an "event", and other errors as "unknown errors" in the trace + if err != nil { + return nil, err + } + return frame, nil +} + +func (r *highlevelReader) readFrame(ctx context.Context) ([]byte, error) { + // Ensure the total number of frames doesn't overflow + // TODO: Should this be LT or LTE? + if r.maxTotalFrames.IsLessThanOrEqual(r.totalFrameCount) { + return nil, ErrFrameCountOverflow(r.maxTotalFrames) + } + // Read the frame, and increase the total frame counter is increased + // This does not at the moment forward the same ReadFrameResult instance, + // but that can maybe be done in the future if needed. It would be needed + // if the underlying Reader would return an interface that extends more + // methods than the default ones. + frame, err := r.read.ReadFrame(ctx) + r.totalFrameCount += 1 + if err != nil { + return nil, err + } + + // Sanitize the frame. + frame, err = sanitize.IfSupported(ctx, r.opts.Sanitizer, r.ContentType(), frame) + if err != nil { + return nil, err + } + + // If it's empty, read the next frame automatically + if len(frame) == 0 { + return r.readFrame(ctx) + } + + // Otherwise, if it's non-empty, return it and increase the "successful" counter + r.successfulFrameCount += 1 + // If the frame count now overflows, return a ErrFrameCountOverflow + if r.opts.MaxFrameCount.IsLessThan(r.successfulFrameCount) { + return nil, ErrFrameCountOverflow(r.opts.MaxFrameCount) + } + return frame, nil +} + +func (r *highlevelReader) ContentType() content.ContentType { return r.read.ContentType() } +func (r *highlevelReader) Close(ctx context.Context) error { return closeWithTrace(ctx, r.read, r) } +func (r *highlevelReader) ContentMetadata() content.Metadata { return r.read.ContentMetadata() } diff --git a/pkg/frame/reader_factory.go b/pkg/frame/reader_factory.go new file mode 100644 index 00000000..51e0fb12 --- /dev/null +++ b/pkg/frame/reader_factory.go @@ -0,0 +1,74 @@ +package frame + +import ( + "context" + + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/util/limitedio" +) + +func DefaultFactory() Factory { return defaultFactory{} } + +var internalFactoryVar = DefaultFactory() + +type defaultFactory struct{} + +func (defaultFactory) NewReader(ct content.ContentType, r content.Reader, opts ...ReaderOption) Reader { + o := defaultReaderOptions().applyOptions(opts) + + var lowlevel Reader + switch ct { + case content.ContentTypeYAML: + lowlevel = newYAMLReader(r, o) + case content.ContentTypeJSON: + lowlevel = newJSONReader(r, o) + default: + return newErrReader(content.ErrUnsupportedContentType(ct), "", r.ContentMetadata()) + } + return newHighlevelReader(lowlevel, o) +} + +func (defaultFactory) NewSingleReader(ct content.ContentType, r content.Reader, opts ...SingleReaderOption) Reader { + o := defaultSingleReaderOptions().applyOptions(opts) + + return newHighlevelReader(newSingleReader(r, ct, o), &readerOptions{ + // Note: The MaxFrameCount == Infinite here makes the singleReader responsible for + // counting how many times + Options: Options{SingleOptions: o.SingleOptions, MaxFrameCount: limitedio.Infinite}, + }) +} + +func (f defaultFactory) NewRecognizingReader(ctx context.Context, r content.Reader, opts ...RecognizingReaderOption) Reader { + o := defaultRecognizingReaderOptions().applyOptions(opts) + + // Recognize the content type using the given recognizer + r, ct, err := content.NewRecognizingReader(ctx, r, o.Recognizer) + if err != nil { + return newErrReader(err, "", r.ContentMetadata()) + } + // Re-use the logic of the "main" Reader constructor; validate ct there + return f.NewReader(ct, r, o) +} + +func (defaultFactory) SupportedContentTypes() content.ContentTypes { + return []content.ContentType{content.ContentTypeYAML, content.ContentTypeJSON} +} + +func newErrReader(err error, ct content.ContentType, meta content.Metadata) Reader { + return &errReader{ + ct, + meta.ToContainer(), + &nopCloser{}, + err, + } +} + +// errReader always returns an error +type errReader struct { + content.ContentTyped + content.MetadataContainer + Closer + err error +} + +func (r *errReader) ReadFrame(context.Context) ([]byte, error) { return nil, r.err } diff --git a/pkg/frame/reader_factory_test.go b/pkg/frame/reader_factory_test.go new file mode 100644 index 00000000..f0604253 --- /dev/null +++ b/pkg/frame/reader_factory_test.go @@ -0,0 +1,60 @@ +package frame + +/*var ( + customErr = errors.New("custom") + customErrIoReadCloser = errIoReadCloser(customErr) +)*/ + +/*TODO +func TestNewReader_Unrecognized(t *testing.T) { + fr := NewReader(FramingType("doesnotexist"), customErrIoReadCloser) + ctx := context.Background() + frame, err := fr.ReadFrame(ctx) + assert.ErrorIs(t, err, ErrUnsupportedFramingType) + assert.Len(t, frame, 0) +}*/ + +/*func Test_toReadCloser(t *testing.T) { + tmp := t.TempDir() + f, err := os.Create(filepath.Join(tmp, "toReadCloser.txt")) + require.Nil(t, err) + defer f.Close() + + tests := []struct { + name string + r io.Reader + wantHasCloser bool + }{ + { + name: "*bytes.Reader", + r: bytes.NewReader([]byte("foo")), + wantHasCloser: false, + }, + { + name: "*os.File", + r: f, + wantHasCloser: true, + }, + { + name: "os.Stdout", + r: os.Stdout, + wantHasCloser: false, + }, + { + name: "", + r: errIoReadCloser(nil), + wantHasCloser: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotRc, gotHasCloser := toReadCloser(tt.r) + wantRc, _ := tt.r.(io.ReadCloser) + if !tt.wantHasCloser { + wantRc = io.NopCloser(tt.r) + } + assert.Equal(t, wantRc, gotRc) + assert.Equal(t, tt.wantHasCloser, gotHasCloser) + }) + } +}*/ diff --git a/pkg/frame/reader_streaming.go b/pkg/frame/reader_streaming.go new file mode 100644 index 00000000..86efddbd --- /dev/null +++ b/pkg/frame/reader_streaming.go @@ -0,0 +1,115 @@ +package frame + +import ( + "context" + "errors" + "io" + + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/util/limitedio" + "k8s.io/apimachinery/pkg/runtime/serializer/json" + "k8s.io/apimachinery/pkg/runtime/serializer/streaming" +) + +func newYAMLReader(r content.Reader, o *readerOptions) Reader { + // json.YAMLFramer.NewFrameReader takes care of the actual YAML framing logic + maxFrameSizeInt, err := o.MaxFrameSize.Int() + if err != nil { + return newErrReader(err, "", r.ContentMetadata()) + } + r = r.Wrap(func(underlying io.ReadCloser) io.Reader { + return newK8sYAMLReader(underlying, maxFrameSizeInt) + }) + + // Mark the content type as YAML + r.ContentMetadata().Apply(content.WithContentType(content.ContentTypeYAML)) + + return newStreamingReader(content.ContentTypeYAML, r, o.MaxFrameSize) +} + +// newJSONReader creates a "low-level" JSON Reader from the given io.ReadCloser. +func newJSONReader(r content.Reader, o *readerOptions) Reader { + // json.Framer.NewFrameReader takes care of the actual JSON framing logic + r = r.Wrap(func(underlying io.ReadCloser) io.Reader { + return json.Framer.NewFrameReader(underlying) + }) + + // Mark the content type as JSON + r.ContentMetadata().Apply(content.WithContentType(content.ContentTypeJSON)) + + return newStreamingReader(content.ContentTypeJSON, r, o.MaxFrameSize) +} + +// newStreamingReader makes a generic Reader that reads from an io.ReadCloser returned +// from Kubernetes' runtime.Framer.NewFrameReader, in exactly the way +// k8s.io/apimachinery/pkg/runtime/serializer/streaming implements this. +// On a high-level, it means that many small Read(p []byte) calls are made as long as +// io.ErrShortBuffer is returned. When err == nil is returned from rc, we know that we're +// at the end of a frame, and at that point the frame is returned. +// +// Note: This Reader is a so-called "low-level" one. It doesn't do tracing, mutex locking, or +// proper closing logic. It must be wrapped by a composite, high-level Reader like highlevelReader. +func newStreamingReader(ct content.ContentType, r content.Reader, maxFrameSize limitedio.Limit) Reader { + // Limit the amount of bytes read from the content.Reader + r, resetCounter := content.WrapLimited(r, maxFrameSize) + // Wrap + cr := r.WrapSegment(func(rc io.ReadCloser) content.RawSegmentReader { + return newK8sStreamingReader(rc, maxFrameSize.Int64()) + }) + + return &streamingReader{ + // Clone the metadata and expose it + // TODO: Maybe ReaderOptions should allow changing it? + MetadataContainer: r.ContentMetadata().Clone().ToContainer(), + ContentTyped: ct, + resetCounter: resetCounter, + cr: cr, + maxFrameSize: maxFrameSize, + } +} + +// streamingReader is a small "conversion" struct that implements the Reader interface for a +// given k8sStreamingReader. When reader_streaming_k8s.go is upstreamed, we can replace the +// temporary k8sStreamingReader interface with a "proper" Kubernetes one. +type streamingReader struct { + content.MetadataContainer + content.ContentTyped + resetCounter content.ResetCounterFunc + cr content.SegmentReader + maxFrameSize limitedio.Limit +} + +func (r *streamingReader) ReadFrame(ctx context.Context) ([]byte, error) { + // Read one frame from the streamReader + frame, err := r.cr.WithContext(ctx).Read() + if err != nil { + // Transform streaming.ErrObjectTooLarge to a ErrFrameSizeOverflow, if returned. + return nil, mapError(err, errorMappings{ + streaming.ErrObjectTooLarge: func() error { + return limitedio.ErrReadSizeOverflow(r.maxFrameSize) + }, + }) + } + // Reset the counter only when we have a successful frame + r.resetCounter() + return frame, nil +} + +func (r *streamingReader) Close(ctx context.Context) error { return r.cr.WithContext(ctx).Close() } + +// mapError is an utility for mapping a "actual" error to a lazily-evaluated "desired" one. +// Equality between the errorMappings' keys and err is defined by errors.Is +func mapError(err error, f errorMappings) error { + for target, mkErr := range f { + if errors.Is(err, target) { + return mkErr() + } + } + return err +} + +// errorMappings maps actual errors to lazily-evaluated desired ones +type errorMappings map[error]mkErrorFunc + +// mkErrorFunc lazily creates an error +type mkErrorFunc func() error diff --git a/pkg/frame/reader_test.go b/pkg/frame/reader_test.go new file mode 100644 index 00000000..32d4ab44 --- /dev/null +++ b/pkg/frame/reader_test.go @@ -0,0 +1,526 @@ +package frame + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/tracing" + "github.com/weaveworks/libgitops/pkg/util/compositeio" + "github.com/weaveworks/libgitops/pkg/util/limitedio" + "go.opentelemetry.io/otel/trace" + "go.uber.org/zap/zapcore" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func init() { + // Set up the global logger + log.SetLogger(zap.New(zap.ConsoleEncoder(func(ec *zapcore.EncoderConfig) { + ec.TimeKey = "" + }))) // zap.JSONEncoder() + + err := tracing.NewBuilder(). + //RegisterStdoutExporter(stdouttrace.WithWriter(io.Discard)). + RegisterInsecureJaegerExporter(""). + //WithLogging(true). + InstallGlobally() + if err != nil { + fmt.Printf("failed to install tracing provider: %v\n", err) + os.Exit(1) + } +} + +// TODO: Make sure that len(frame) == 0 when err != nil for the Writer. + +// TODO: Test the output traces more througoutly, when there is SpanProcessor that supports writing +// relevant data to a file, and do matching between spans. + +// TODO: Make some 16M (or more) JSON/YAML files and show that these are readable (or not). That's not +// testing a case that already isn't tested by the unit tests below, but would be a good marker that +// it actually solves the right problem. + +// TODO: Maybe add some race-condition tests? The centralized place mutexes are used are in +// highlevel{Reader,Writer}, so that'd be the place in that case. + +type testcase struct { + singleReadOpts []SingleReaderOption + singleWriteOpts []SingleWriterOption + // single{Read,Write}Opts are automatically casted to {Reader,Writer}Options if possible + // and included in readOpts and writeOpts; no need to specify twice + readOpts []ReaderOption + writeOpts []WriterOption + // {read,write}Opts are automatically casted to Recognizing{Reader,Writer}Options if possible + // and included in recognizing{Read,Write}Opts; no need to specify twice + recognizingReadOpts []RecognizingReaderOption + recognizingWriteOpts []RecognizingWriterOption + + name string + testdata []testdata + // Reader.ReadFrame will be called len(readResults) times. If a err == nil return is expected, just put + // nil in the error slice. Similarly for Writer.WriteFrame and writeResults. + // Note that len(readResults) >= len(frames) and len(writeResults) >= len(frames) must hold. + // By issuing more reads or writes than there are frames, one can check the error behavior + readResults []error + writeResults []error + // if closeWriterIdx or closeReaderIdx are non-nil, the Reader/Writer will be closed after the read at + // that specified index. closeWriterErr and closeReaderErr can be used to check the error returned by + // the close call. + closeWriterIdx *int64 + closeWriterErr error + //expectWriterClosed bool + closeReaderIdx *int64 + closeReaderErr error + + //expectReaderCloser bool +} + +type testdata struct { + ct content.ContentType + single, recognizing bool + // frames contain the individual frames of rawData, which in turn is the content of the underlying + // source/stream. if len(writeResults) == 0, there will be no checking that writing all frames + // in order will produce the correct rawData. if len(readResults) == 0, there will be no checking + // that reading rawData will produce the frames string + rawData string + frames []string +} + +const ( + yamlSep = "---\n" + noNewlineYAML = `foobar: true` + testYAML = noNewlineYAML + "\n" + testYAMLlen = int64(len(testYAML)) + messyYAMLP1 = ` +--- + +--- +` + noNewlineYAML + ` +` + messyYAMLP2 = ` + +--- +--- +` + noNewlineYAML + ` +---` + messyYAML = messyYAMLP1 + messyYAMLP2 + + testJSON = `{"foo":true} +` + testJSONlen = int64(len(testJSON)) + testJSON2 = `{"bar":"hello"} +` + messyJSONP1 = ` + +` + testJSON + ` +` + messyJSONP2 = ` + +` + testJSON + ` +` + messyJSON = messyJSONP1 + messyJSONP2 + + otherCT = content.ContentType("other") + otherFrame = "('other'; 9)\n('bar'; true)" + otherFrameLen = int64(len(otherFrame)) +) + +func TestReader(t *testing.T) { + // Some tests depend on this + require.Equal(t, testYAMLlen, testJSONlen) + NewFactoryTester(t, defaultFactory{}).Test() + assert.Nil(t, tracing.ForceFlushGlobal(context.Background(), 0)) +} + +// TODO: Test that closing of Readers and Writers works + +var defaultTestCases = []testcase{ + // Roundtrip cases + { + name: "simple roundtrip", + testdata: []testdata{ + {ct: content.ContentTypeYAML, frames: []string{testYAML}, rawData: yamlSep + testYAML}, + {ct: content.ContentTypeJSON, frames: []string{testJSON}, rawData: testJSON}, + }, + writeResults: []error{nil, nil, nil, nil}, + readResults: []error{nil, io.EOF, io.EOF, io.EOF}, + }, + + { + name: "two-frame roundtrip with closed writer", + testdata: []testdata{ + {ct: content.ContentTypeYAML, frames: []string{testYAML, testYAML}, rawData: yamlSep + testYAML + yamlSep + testYAML}, + {ct: content.ContentTypeJSON, frames: []string{testJSON, testJSON2}, rawData: testJSON + testJSON2}, + }, + writeResults: []error{nil, nil, nil, nil}, + readResults: []error{nil, nil, io.EOF, io.EOF}, + }, + // YAML newline addition + { + name: "YAML Read: a newline will be added", + testdata: []testdata{ + {ct: content.ContentTypeYAML, rawData: noNewlineYAML, frames: []string{testYAML}}, + }, + readResults: []error{nil, io.EOF}, + }, + { + name: "YAML Write: a newline will be added", + testdata: []testdata{ + {ct: content.ContentTypeYAML, frames: []string{noNewlineYAML}, rawData: yamlSep + testYAML}, + }, + writeResults: []error{nil}, + }, + // Empty frames + { + name: "Read: io.EOF when there are no non-empty frames", + testdata: []testdata{ + {ct: content.ContentTypeYAML, rawData: "---"}, + {ct: content.ContentTypeYAML, rawData: "---\n"}, + {ct: content.ContentTypeJSON, rawData: ""}, + {ct: content.ContentTypeJSON, rawData: " \n "}, + }, + readResults: []error{io.EOF}, + }, + { + name: "Write: Empty sanitized frames aren't written", + testdata: []testdata{ + {ct: content.ContentTypeYAML, frames: []string{"---", "---\n", " \n--- \n---"}}, + {ct: content.ContentTypeJSON, frames: []string{"", " \n ", " "}}, + }, + writeResults: []error{nil, nil, nil}, + }, + { + name: "Write: can write empty frames forever without errors", + testdata: []testdata{ + {ct: content.ContentTypeYAML, frames: []string{testYAML, testYAML}, rawData: yamlSep + testYAML + yamlSep + testYAML}, + {ct: content.ContentTypeJSON, frames: []string{testJSON, testJSON2}, rawData: testJSON + testJSON2}, + }, + writeResults: []error{nil, nil, nil, nil, nil}, + readResults: []error{nil, nil, io.EOF}, + }, + // Sanitation + { + name: "YAML Read: a leading \\n--- will be ignored", + testdata: []testdata{ + {ct: content.ContentTypeYAML, rawData: "\n" + yamlSep + noNewlineYAML, frames: []string{testYAML}}, + }, + readResults: []error{nil, io.EOF}, + }, + { + name: "YAML Read: a leading --- will be ignored", + testdata: []testdata{ + {ct: content.ContentTypeYAML, rawData: yamlSep + noNewlineYAML, frames: []string{testYAML}}, + }, + readResults: []error{nil, io.EOF}, + }, + { + name: "Read: sanitize messy content", + testdata: []testdata{ + {ct: content.ContentTypeYAML, rawData: messyYAML, frames: []string{testYAML, testYAML}}, + {ct: content.ContentTypeJSON, rawData: messyJSON, frames: []string{testJSON, testJSON}}, + }, + readResults: []error{nil, nil, io.EOF}, + }, + { + name: "Write: sanitize messy content", + testdata: []testdata{ + {ct: content.ContentTypeYAML, frames: []string{messyYAMLP1, messyYAMLP2}, rawData: yamlSep + testYAML + yamlSep + testYAML}, + {ct: content.ContentTypeJSON, frames: []string{messyJSONP1, messyJSONP2}, rawData: testJSON + testJSON}, + }, + writeResults: []error{nil, nil}, + }, + // MaxFrameSize + { + name: "Read: the frame size is exactly within bounds, also enforce counter reset", + testdata: []testdata{ + {ct: content.ContentTypeYAML, rawData: yamlSep + testYAML + yamlSep + testYAML, frames: []string{testYAML, testYAML}}, + {ct: content.ContentTypeJSON, rawData: testJSON + testJSON, frames: []string{testJSON, testJSON}}, + }, + singleReadOpts: []SingleReaderOption{&SingleOptions{MaxFrameSize: limitedio.Limit(testYAMLlen)}}, + readResults: []error{nil, nil, io.EOF}, + }, + { + name: "YAML Read: there is a newline before the initial ---, should sanitize", + testdata: []testdata{ + {ct: content.ContentTypeYAML, rawData: "\n" + yamlSep + testYAML + yamlSep + testYAML, frames: []string{testYAML, testYAML}}, + }, + singleReadOpts: []SingleReaderOption{&SingleOptions{MaxFrameSize: limitedio.Limit(testYAMLlen)}}, + readResults: []error{nil, nil, io.EOF}, + }, + { + name: "Read: the frame is out of bounds, on the same line", + testdata: []testdata{ + {ct: content.ContentTypeYAML, rawData: testYAML}, + {ct: content.ContentTypeJSON, rawData: testJSON}, + }, + singleReadOpts: []SingleReaderOption{&SingleOptions{MaxFrameSize: limitedio.Limit(testYAMLlen - 2)}}, + readResults: []error{&limitedio.ReadSizeOverflowError{}}, + }, + { + name: "YAML Read: the frame is out of bounds, but continues on the next line", + testdata: []testdata{ + {ct: content.ContentTypeYAML, rawData: testYAML + testYAML}, + }, + singleReadOpts: []SingleReaderOption{&SingleOptions{MaxFrameSize: limitedio.Limit(testYAMLlen)}}, + readResults: []error{&limitedio.ReadSizeOverflowError{}}, + }, + { + name: "Read: first frame ok, then always frame overflow", + testdata: []testdata{ + {ct: content.ContentTypeYAML, rawData: testYAML + yamlSep + testYAML + testYAML, frames: []string{testYAML}}, + {ct: content.ContentTypeJSON, rawData: testJSON + testJSON2, frames: []string{testJSON}}, + }, + singleReadOpts: []SingleReaderOption{&SingleOptions{MaxFrameSize: limitedio.Limit(testYAMLlen)}}, + readResults: []error{nil, &limitedio.ReadSizeOverflowError{}, &limitedio.ReadSizeOverflowError{}, &limitedio.ReadSizeOverflowError{}}, + }, + { + name: "Write: the second frame is too large, ignore that, but allow writing smaller frames later", + testdata: []testdata{ + {ct: content.ContentTypeYAML, frames: []string{testYAML, testYAML + testYAML, testYAML}, rawData: yamlSep + testYAML + yamlSep + testYAML}, + {ct: content.ContentTypeJSON, frames: []string{testJSON, testJSON2, testJSON}, rawData: testJSON + testJSON}, + }, + singleWriteOpts: []SingleWriterOption{&SingleOptions{MaxFrameSize: limitedio.Limit(testYAMLlen)}}, + writeResults: []error{nil, &limitedio.ReadSizeOverflowError{}, nil}, + }, + // TODO: test negative limits too + { + name: "first frame ok, then Read => EOF and Write => nil consistently", + testdata: []testdata{ + {ct: content.ContentTypeYAML, frames: []string{testYAML}, rawData: yamlSep + testYAML}, + {ct: content.ContentTypeJSON, frames: []string{testJSON}, rawData: testJSON}, + }, + readResults: []error{nil, io.EOF, io.EOF, io.EOF, io.EOF}, + writeResults: []error{nil, nil, nil, nil, nil}, + }, + // MaxFrameCount + { + name: "Write: Don't allow writing more than a maximum amount of frames", + testdata: []testdata{ + {ct: content.ContentTypeYAML, frames: []string{testYAML, testYAML, testYAML}, rawData: yamlSep + testYAML + yamlSep + testYAML}, + {ct: content.ContentTypeJSON, frames: []string{testJSON, testJSON, testJSON}, rawData: testJSON + testJSON}, + }, + writeResults: []error{nil, nil, &FrameCountOverflowError{}, &FrameCountOverflowError{}}, + writeOpts: []WriterOption{&Options{MaxFrameCount: 2}}, + }, + { + name: "Read: Don't allow reading more than a maximum amount of successful frames", + testdata: []testdata{ + {ct: content.ContentTypeYAML, + rawData: testYAML + yamlSep + testYAML + yamlSep + testYAML, + frames: []string{testYAML, testYAML}}, + {ct: content.ContentTypeJSON, + rawData: testJSON + testJSON + testJSON, + frames: []string{testJSON, testJSON}}, + }, + readResults: []error{nil, nil, &FrameCountOverflowError{}, &FrameCountOverflowError{}}, + readOpts: []ReaderOption{&Options{MaxFrameCount: 2}}, + }, + { + name: "Read: Don't allow reading more than a maximum amount of successful frames, and 10x in total", + testdata: []testdata{ + {ct: content.ContentTypeYAML, + rawData: strings.Repeat("\n"+yamlSep, 10) + testYAML}, + }, + readResults: []error{&FrameCountOverflowError{}, &FrameCountOverflowError{}}, + readOpts: []ReaderOption{&Options{MaxFrameCount: 1}}, + }, + { + name: "Read: Allow reading up to the maximum amount of 10x the successful frames count", + testdata: []testdata{ + {ct: content.ContentTypeYAML, + rawData: strings.Repeat("\n"+yamlSep, 9) + testYAML + yamlSep + yamlSep, frames: []string{testYAML}}, + }, + readResults: []error{nil, &FrameCountOverflowError{}, &FrameCountOverflowError{}}, + readOpts: []ReaderOption{&Options{MaxFrameCount: 1}}, + }, + { + name: "Read: Allow reading exactly that amount of successful frames, if then io.EOF", + testdata: []testdata{ + {ct: content.ContentTypeYAML, + rawData: testYAML + yamlSep + testYAML, + frames: []string{testYAML, testYAML}}, + {ct: content.ContentTypeJSON, + rawData: testJSON + testJSON, + frames: []string{testJSON, testJSON}}, + }, + readResults: []error{nil, nil, io.EOF, io.EOF}, + readOpts: []ReaderOption{&Options{MaxFrameCount: 2}}, + }, + // Other Framing Types and Single + { + name: "Roundtrip: Allow reading other framing types for single reader, check overflows too", + testdata: []testdata{ + {ct: otherCT, single: true, rawData: otherFrame, frames: []string{otherFrame}}, + }, + writeResults: []error{nil, &FrameCountOverflowError{}, &FrameCountOverflowError{}, &FrameCountOverflowError{}}, + readResults: []error{nil, io.EOF, io.EOF, io.EOF}, + }, + { + name: "Read: other framing type frame size is exactly within bounds", + testdata: []testdata{ + {ct: otherCT, single: true, rawData: otherFrame, frames: []string{otherFrame}}, + }, + singleReadOpts: []SingleReaderOption{SingleOptions{MaxFrameSize: limitedio.Limit(otherFrameLen)}}, + readResults: []error{nil, io.EOF}, + }, + { + name: "Read: other framing type frame size overflow", + testdata: []testdata{ + {ct: otherCT, single: true, rawData: otherFrame}, + }, + singleReadOpts: []SingleReaderOption{SingleOptions{MaxFrameSize: limitedio.Limit(otherFrameLen - 1)}}, + readResults: []error{&limitedio.ReadSizeOverflowError{}, io.EOF, io.EOF}, + }, + { + name: "Write: other framing type frame size overflow", + testdata: []testdata{ + {ct: otherCT, single: true, frames: []string{otherFrame, otherFrame}}, + }, + singleWriteOpts: []SingleWriterOption{SingleOptions{MaxFrameSize: limitedio.Limit(otherFrameLen - 1)}}, + writeResults: []error{&limitedio.ReadSizeOverflowError{}, &limitedio.ReadSizeOverflowError{}, nil}, + }, +} + +func NewFactoryTester(t *testing.T, f Factory) *FactoryTester { + return &FactoryTester{ + t: t, + factory: f, + cases: defaultTestCases, + } +} + +type FactoryTester struct { + t *testing.T + factory Factory + + cases []testcase +} + +func (h *FactoryTester) Test() { + for _, c := range h.cases { + h.t.Run(c.name, func(t *testing.T) { + h.testRoundtripCase(t, &c) + }) + } +} + +func (h *FactoryTester) testRoundtripCase(t *testing.T, c *testcase) { + sropt := (&singleReaderOptions{}).applyOptions(c.singleReadOpts) + swopt := (&singleWriterOptions{}).applyOptions(c.singleWriteOpts) + ropt := (&readerOptions{}).applyOptions(c.readOpts) + wopt := (&writerOptions{}).applyOptions(c.writeOpts) + + c.readOpts = append(c.readOpts, sropt) + c.recognizingReadOpts = append(c.recognizingReadOpts, sropt) + c.recognizingReadOpts = append(c.recognizingReadOpts, ropt) + + c.writeOpts = append(c.writeOpts, swopt) + c.recognizingWriteOpts = append(c.recognizingWriteOpts, swopt) + c.recognizingWriteOpts = append(c.recognizingWriteOpts, wopt) + + ctx := context.Background() + for i, data := range c.testdata { + subName := fmt.Sprintf("%d %s", i, data.ct) + t.Run(subName, func(t *testing.T) { + tr := tracing.TracerOptions{Name: fmt.Sprintf("%s %s", c.name, subName), UseGlobal: pointer.Bool(true)} + _ = tr.TraceFunc(ctx, "", func(ctx context.Context, _ trace.Span) error { + h.testRoundtripCaseContentType(t, ctx, c, &data) + return nil + }).Register() + }) + } +} + +func (h *FactoryTester) testRoundtripCaseContentType(t *testing.T, ctx context.Context, c *testcase, d *testdata) { + var buf bytes.Buffer + + readCloseCounter := &recordingCloser{} + writeCloseCounter := &recordingCloser{} + cw := content.NewWriter(compositeio.WriteCloser(&buf, writeCloseCounter)) + cr := content.NewReader(compositeio.ReadCloser(&buf, readCloseCounter)) + var w Writer + if d.single && d.recognizing { + panic("cannot be both single and recognizing") + } else if d.single && !d.recognizing { + w = h.factory.NewSingleWriter(d.ct, cw, c.singleWriteOpts...) + } else if !d.single && d.recognizing { + w = h.factory.NewRecognizingWriter(cw, c.recognizingWriteOpts...) + } else { + w = h.factory.NewWriter(d.ct, cw, c.writeOpts...) + } + assert.Equalf(t, w.ContentType(), d.ct, "Writer.content.ContentType") + + var r Reader + if d.single && d.recognizing { + panic("cannot be both single and recognizing") + } else if d.single && !d.recognizing { + r = h.factory.NewSingleReader(d.ct, cr, c.singleReadOpts...) + } else if !d.single && d.recognizing { + r = h.factory.NewRecognizingReader(ctx, cr, c.recognizingReadOpts...) + } else { + r = h.factory.NewReader(d.ct, cr, c.readOpts...) + } + assert.Equalf(t, r.ContentType(), d.ct, "Reader.content.ContentType") + + // Write frames using the writer + for i, expected := range c.writeResults { + var frame []byte + // Only write a frame using the writer if one is supplied + if i < len(d.frames) { + frame = []byte(d.frames[i]) + } + + // Write the frame using the writer and check the error + got := w.WriteFrame(ctx, frame) + assert.ErrorIsf(t, got, expected, "Writer.WriteFrame err %d", i) + + // If we should close the writer here, do it and check the expected error + if c.closeWriterIdx != nil && *c.closeWriterIdx == int64(i) { + assert.ErrorIsf(t, w.Close(ctx), c.closeWriterErr, "Writer.Close err %d", i) + } + } + + assert.Equalf(t, 0, writeCloseCounter.count, "Writer should not be closed") + + // Check that the written output was as expected, if writing is enabled + if len(c.writeResults) != 0 { + assert.Equalf(t, d.rawData, buf.String(), "Writer Output") + } else { + // If writing was not tested, make sure the buffer contains the raw data for reading + buf = *bytes.NewBufferString(d.rawData) + } + + // Read frames using the reader + for i, expected := range c.readResults { + // Check the expected error + frame, err := r.ReadFrame(ctx) + assert.ErrorIsf(t, err, expected, "Reader.ReadFrame err %d", i) + // Only check the frame content if there's an expected frame + if i < len(d.frames) { + assert.Equalf(t, d.frames[i], string(frame), "Reader.ReadFrame frame %d", i) + } + + // If we should close the reader here, do it and check the expected error + if c.closeReaderIdx != nil && *c.closeReaderIdx == int64(i) { + assert.ErrorIsf(t, r.Close(ctx), c.closeReaderErr, "Reader.Close err %d", i) + } + } + assert.Equalf(t, 0, readCloseCounter.count, "Reader should not be closed") +} + +type recordingCloser struct { + count int +} + +func (c *recordingCloser) Close() error { + c.count += 1 + return nil +} diff --git a/pkg/frame/sanitize/comments/LICENSE b/pkg/frame/sanitize/comments/LICENSE new file mode 100644 index 00000000..9c8f3ea0 --- /dev/null +++ b/pkg/frame/sanitize/comments/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/pkg/frame/sanitize/comments/comments.go b/pkg/frame/sanitize/comments/comments.go new file mode 100644 index 00000000..140542c6 --- /dev/null +++ b/pkg/frame/sanitize/comments/comments.go @@ -0,0 +1,117 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// This package provides a means to copy over comments between +// two kyaml/yaml.RNode trees. This code is derived from +// the sigs.k8s.io/kustomize/kyaml/comments package, at revision +// 600d4f2c0bf174abd76d03e49939ee0c34b2a019. +// +// It has been slightly modified and adapted to not lose any +// comment from the old tree, although the node the comment is +// attached to doesn't exist in the new tree. To solve this, +// this package moves any such comments to the beginning of the +// file. +// This file is a temporary means as long as we're waiting for +// these code changes to get upstreamed to its origin, the kustomize repo. +// https://pkg.go.dev/sigs.k8s.io/kustomize/kyaml/comments?tab=doc#CopyComments + +package comments + +import ( + "sigs.k8s.io/kustomize/kyaml/openapi" + "sigs.k8s.io/kustomize/kyaml/yaml" + "sigs.k8s.io/kustomize/kyaml/yaml/walk" +) + +// CopyComments recursively copies the comments on fields in from to fields in to +func CopyComments(from, to *yaml.RNode, moveCommentsTop bool) error { + // create the copier struct for the specified mode + c := &copier{moveCommentsTop, nil, make(map[int]trackedKey)} + + // copy over comments for the root tree(s) + c.copyFieldComments(from, to) + + // walk the fields copying comments + _, err := walk.Walker{ + Sources: []*yaml.RNode{from, to}, + Visitor: c, + VisitKeysAsScalars: true}.Walk() + + // restore lost comments to the top of the document, if applicable + if moveCommentsTop { + c.restoreLostComments(to) + } + + return err +} + +// copier implements walk.Visitor, and copies comments to fields shared between 2 instances +// of a resource +type copier struct { + // moveCommentsTop specifies whether to recover lost comments or not + moveCommentsTop bool + // if moveCommentsTop is true, this slice will be populated with lost comment entries while iterating + lostComments []lostComment + // if moveCommentsTop is true, this map will be populated with tracked YAML keys for lines while iterating + trackedKeys map[int]trackedKey +} + +func (c *copier) VisitMap(s walk.Sources, _ *openapi.ResourceSchema) (*yaml.RNode, error) { + c.copyFieldComments(s.Dest(), s.Origin()) + return s.Dest(), nil +} + +func (c *copier) VisitScalar(s walk.Sources, _ *openapi.ResourceSchema) (*yaml.RNode, error) { + to := s.Origin() + // TODO: File a bug with upstream yaml to handle comments for FoldedStyle scalar nodes + // Hack: convert FoldedStyle scalar node to DoubleQuotedStyle as the line comments are + // being serialized without space + // https://github.com/GoogleContainerTools/kpt/issues/766 + if to != nil && to.Document().Style == yaml.FoldedStyle { + to.Document().Style = yaml.DoubleQuotedStyle + } + + c.copyFieldComments(s.Dest(), to) + return s.Dest(), nil +} + +func (c *copier) VisitList(s walk.Sources, _ *openapi.ResourceSchema, _ walk.ListKind) (*yaml.RNode, error) { + c.copyFieldComments(s.Dest(), s.Origin()) + destItems := s.Dest().Content() + originItems := s.Origin().Content() + + for i := 0; i < len(destItems) && i < len(originItems); i++ { + dest := destItems[i] + origin := originItems[i] + + if dest.Value == origin.Value { + c.copyFieldComments(yaml.NewRNode(dest), yaml.NewRNode(origin)) + } + } + + return s.Dest(), nil +} + +// copyFieldComments copies the comment from one field to another +func (c *copier) copyFieldComments(from, to *yaml.RNode) { + // If either from or to doesn't exist, return quickly + if from == nil || to == nil { + + // If we asked for moving lost comments (i.e. if from is non-nil and to is nil), + // do it through the moveLostCommentToTop function + if c.moveCommentsTop && from != nil && to == nil { + c.rememberLostComments(from) + } + return + } + + if to.Document().LineComment == "" { + to.Document().LineComment = from.Document().LineComment + } + if to.Document().HeadComment == "" { + to.Document().HeadComment = from.Document().HeadComment + } + if to.Document().FootComment == "" { + to.Document().FootComment = from.Document().FootComment + } +} diff --git a/pkg/frame/sanitize/comments/comments_test.go b/pkg/frame/sanitize/comments/comments_test.go new file mode 100644 index 00000000..dfd874bb --- /dev/null +++ b/pkg/frame/sanitize/comments/comments_test.go @@ -0,0 +1,370 @@ +// Copyright 2019 The Kubernetes Authors. +// SPDX-License-Identifier: Apache-2.0 + +// This package provides a means to copy over comments between +// two kyaml/yaml.RNode trees. This code is derived from +// the sigs.k8s.io/kustomize/kyaml/comments package, at revision +// 600d4f2c0bf174abd76d03e49939ee0c34b2a019. +// +// It has been slightly modified and adapted to not lose any +// comment from the old tree, although the node the comment is +// attached to doesn't exist in the new tree. To solve this, +// this package moves any such comments to the beginning of the +// file. +// This file is a temporary means as long as we're waiting for +// these code changes to get upstreamed to its origin, the kustomize repo. +// https://pkg.go.dev/sigs.k8s.io/kustomize/kyaml/comments?tab=doc#CopyComments + +package comments + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +func TestCopyComments(t *testing.T) { + testCases := []struct { + name string + from string + to string + expected string + }{ + { + name: "copy_comments", + from: ` +# A +# +# B + +# C +apiVersion: apps/v1 +kind: Deployment +spec: # comment 1 + # comment 2 + replicas: 3 # comment 3 + # comment 4 +`, + to: ` +apiVersion: apps/v1 +kind: Deployment +spec: + replicas: 4 +`, + expected: ` +# A +# +# B + +# C +apiVersion: apps/v1 +kind: Deployment +spec: # comment 1 + # comment 2 + replicas: 4 # comment 3 + # comment 4 +`, + }, { + name: "associative_list", + from: ` +apiVersion: apps/v1 +kind: Deployment +spec: + template: + spec: + containers: + - name: foo + image: bar # comment 1 +`, + to: ` +apiVersion: apps/v1 +kind: Deployment +spec: + template: + spec: + containers: + - name: foo + image: bar +`, + expected: ` +apiVersion: apps/v1 +kind: Deployment +spec: + template: + spec: + containers: + - name: foo + image: bar # comment 1 +`, + }, { + name: "keep_comments", + from: ` +# A +# +# B + +# C +apiVersion: apps/v1 +kind: Deployment +spec: # comment 1 + # comment 2 + replicas: 3 # comment 3 + # comment 4 +`, + to: ` +apiVersion: apps/v1 +kind: Deployment +spec: + replicas: 4 # comment 5 +`, + expected: ` +# A +# +# B + +# C +apiVersion: apps/v1 +kind: Deployment +spec: # comment 1 + # comment 2 + replicas: 4 # comment 5 + # comment 4 +`, + }, { + name: "copy_item_comments", + from: ` +apiVersion: apps/v1 +kind: Deployment +items: +- a # comment +`, + to: ` +apiVersion: apps/v1 +kind: Deployment +items: +- a +`, + expected: ` +apiVersion: apps/v1 +kind: Deployment +items: +- a # comment +`, + }, { + name: "copy_item_comments_2", + from: ` +apiVersion: apps/v1 +kind: Deployment +items: +# comment +- a +`, + to: ` +apiVersion: apps/v1 +kind: Deployment +items: +- a +`, + expected: ` +apiVersion: apps/v1 +kind: Deployment +items: +# comment +- a +`, + }, { + name: "copy_item_comments_middle", + from: ` +apiVersion: apps/v1 +kind: Deployment +items: +- a +- b # comment +- c +`, + to: ` +apiVersion: apps/v1 +kind: Deployment +items: +- d +- b +- e +`, + expected: ` +apiVersion: apps/v1 +kind: Deployment +items: +- d +- b # comment +- e +`, + }, { + name: "copy_item_comments_moved", + from: ` +apiVersion: apps/v1 +kind: Deployment +items: +- a +- b # comment +- c +`, + to: ` +apiVersion: apps/v1 +kind: Deployment +items: +- a +- c +- b +`, + expected: ` +apiVersion: apps/v1 +kind: Deployment +items: +- a +- c +- b +`, + }, + { + name: "copy_item_comments_no_match", + from: ` +apiVersion: apps/v1 +kind: Deployment +items: +- a # comment +`, + to: ` +apiVersion: apps/v1 +kind: Deployment +items: +- b +`, + expected: ` +apiVersion: apps/v1 +kind: Deployment +items: +- b +`, + }, { + name: "copy_item_comments_add", + from: ` +apiVersion: apps/v1 +kind: Deployment +items: +- a # comment +`, + to: ` +apiVersion: apps/v1 +kind: Deployment +items: +- a +- b +`, + expected: ` +apiVersion: apps/v1 +kind: Deployment +items: +- a # comment +- b +`, + }, { + name: "copy_item_comments_remove", + from: ` +apiVersion: apps/v1 +kind: Deployment +items: +- a # comment +- b +`, + to: ` +apiVersion: apps/v1 +kind: Deployment +items: +- a +`, + expected: ` +apiVersion: apps/v1 +kind: Deployment +items: +- a # comment +`, + }, { + name: "copy_comments_folded_style", + from: ` +apiVersion: v1 +kind: ConfigMap +data: + somekey: "012345678901234567890123456789012345678901234567890123456789012345678901234" # x +`, + to: ` +apiVersion: v1 +kind: ConfigMap +data: + somekey: >- + 012345678901234567890123456789012345678901234567890123456789012345678901234 +`, + expected: ` +apiVersion: v1 +kind: ConfigMap +data: + somekey: "012345678901234567890123456789012345678901234567890123456789012345678901234" # x +`, + }, { + name: "copy_comments_move_to_top", + from: ` +# Top comment + +apiVersion: v1 +kind: ConfigMap # Foo +# Bar +data: + # Baz + somekey: "012345678901234567890123456789012345678901234567890123456789012345678901234" # x +`, + to: ` +apiVersion: v1 +`, + expected: ` +# Top comment +# Comments lost during file manipulation: +# Field "data": "Bar" +# Field "somekey": "Baz" +# Field "somekey": "x" +# Field "kind": "Foo" + +apiVersion: v1 +`, + }, + } + + for i := range testCases { + tc := testCases[i] + t.Run(tc.name, func(t *testing.T) { + from, err := yaml.Parse(tc.from) + if !assert.NoError(t, err) { + t.FailNow() + } + + to, err := yaml.Parse(tc.to) + if !assert.NoError(t, err) { + t.FailNow() + } + + err = CopyComments(from, to, true) + if !assert.NoError(t, err) { + t.FailNow() + } + + actual, err := to.String() + if !assert.NoError(t, err) { + t.FailNow() + } + + if !assert.Equal(t, strings.TrimSpace(tc.expected), strings.TrimSpace(actual)) { + t.FailNow() + } + }) + } +} diff --git a/pkg/frame/sanitize/comments/lost.go b/pkg/frame/sanitize/comments/lost.go new file mode 100644 index 00000000..f85fc1f7 --- /dev/null +++ b/pkg/frame/sanitize/comments/lost.go @@ -0,0 +1,118 @@ +package comments + +import ( + "fmt" + "strings" + + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// lostComment specifies a mapping between a fieldName (in the old structure), which doesn't exist in the +// new tree, and its related comment. It optionally specifies the line number of the comment, a positive +// line number is used to distinguish inline comments, which require special handling to resolve the +// correct field name, since they are attached to the value and not the key of a YAML key-value pair. +type lostComment struct { + fieldName string + comment string + line int +} + +// Since the YAML walker needs to visit all keys as scalar nodes, we have no way of distinguishing keys from +// values when trying to resolve the field names for inline comments. By tracking the leftmost key (lowest +// column value, be it a key or value) for each row, we can figure out the actual key for inline comments +// and not accidentally use a value as the field name, since keys are guaranteed to come before values. +type trackedKey struct { + name string + column int +} + +// trackKey compares the column position of the given node to the stored best (lowest) column position for the +// node's line and replaces the best if the given node is more likely to be a key (has a smaller column value). +func (c *copier) trackKey(node *yaml.Node) { + // If the given key doesn't have a smaller column value, return. + if key, ok := c.trackedKeys[node.Line]; ok { + if key.column < node.Column { + return + } + } + + // Store the new best tracked key for the line. + c.trackedKeys[node.Line] = trackedKey{ + name: node.Value, + column: node.Column, + } +} + +// parseComments parses the line, head and foot comments of the given node in this +// order and cleans them up (removes the potential "#" prefix and trims whitespace). +func parseComments(node *yaml.Node) (comments []string) { + for _, comment := range []string{node.LineComment, node.HeadComment, node.FootComment} { + comments = append(comments, strings.TrimSpace(strings.TrimPrefix(comment, "#"))) + } + + return +} + +// rememberLostComments goes through the comments attached to the 'from' node and adds +// them to the internal lostComments slice for usage after the tree walk. It also +// stores the line numbers for inline comments for resolving the correct field names. +func (c *copier) rememberLostComments(from *yaml.RNode) { + // Track the given node as a potential key for inline comments. + c.trackKey(from.Document()) + + // Get the field name, for head/foot comments this is the correct key, + // but for inline comments this resolves to the value of the field instead. + fieldName := from.Document().Value + comments := parseComments(from.Document()) + line := -1 // Don't store the line number of the comment by default, this is reserved for inline comments. + + for i, comment := range comments { + // If the line number is set (positive), an inline comment + // has been registered for this node and we can stop parsing. + if line >= 0 { + break + } + + // Do not store blank comment entries (nonexistent comments). + if len(comment) == 0 { + continue + } + + if i == 0 { + // If this node has an inline comment, store its line + // number for resolving the correct field name later. + line = from.Document().Line + } + + // Append the lost comment to the slice of copier. + c.lostComments = append(c.lostComments, lostComment{ + fieldName: fieldName, + comment: comment, + line: line, + }) + } +} + +// restoreLostComments writes the cached lost comments to the top of the to YAML tree. +// If it encounters inline comments, it will check the cached tracked keys for the +// best key for the line on which the comment resided. If no key is found for some +// reason, it will use the stored field name (the field value) as the key. +func (c *copier) restoreLostComments(to *yaml.RNode) { + for i, lc := range c.lostComments { + if i == 0 { + to.Document().HeadComment += "\nComments lost during file manipulation:" + } + + fieldName := lc.fieldName + if lc.line >= 0 { + // This is an inline comment, resolve the field name from the tracked keys. + if key, ok := c.trackedKeys[lc.line]; ok { + fieldName = key.name + } + } + + to.Document().HeadComment += fmt.Sprintf("\n# Field %q: %q", fieldName, lc.comment) + } + + to.Document().HeadComment = strings.TrimPrefix(to.Document().HeadComment, "\n") +} diff --git a/pkg/frame/sanitize/sanitize.go b/pkg/frame/sanitize/sanitize.go new file mode 100644 index 00000000..d6e42a98 --- /dev/null +++ b/pkg/frame/sanitize/sanitize.go @@ -0,0 +1,220 @@ +package sanitize + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "strings" + + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/frame/sanitize/comments" + "k8s.io/utils/pointer" + "sigs.k8s.io/kustomize/kyaml/kio" + "sigs.k8s.io/kustomize/kyaml/yaml" +) + +// Sanitizer is an interface for sanitizing frames. Note that a sanitizer can only do +// its work correctly if frame actually only contains one frame within. +type Sanitizer interface { + // Sanitize sanitizes the frame in a standardized way for the given + // FramingType. If the FramingType isn't known, the Sanitizer can choose between + // returning an ErrUnsupportedFramingType error or just returning frame, nil unmodified. + // If ErrUnsupportedFramingType is returned, the consumer won't probably be able to handle + // other framing types than the default ones, which might not be desired. + // + // The returned frame should have len == 0 if it's considered empty. + Sanitize(ctx context.Context, ct content.ContentType, frame []byte) ([]byte, error) + + content.ContentTypeSupporter +} + +// defaultSanitizer implements frame sanitation for JSON and YAML. +// +// For YAML it removes unnecessary "---" separators, whitespace and newlines. +// The YAML frame always ends with a newline, unless the sanitized YAML was an empty string, in which +// case an empty string with len == 0 will be returned. +// +// For JSON it sanitizes the JSON frame by removing unnecessary spaces and newlines around it. +func NewJSONYAML(opts ...JSONYAMLOption) Sanitizer { + return &defaultSanitizer{defaultJSONYAMLOptions().applyOptions(opts)} +} + +func WithCompactIndent() JSONYAMLOption { + return WithSpacesIndent(0) +} + +func WithSpacesIndent(spaces uint8) JSONYAMLOption { + i := strings.Repeat(" ", int(spaces)) + return &jsonYAMLOptions{Indentation: &i} +} + +func WithTabsIndent(tabs uint8) JSONYAMLOption { + i := strings.Repeat("\t", int(tabs)) + return &jsonYAMLOptions{Indentation: &i} +} + +func WithCompactSeqIndent() JSONYAMLOption { + return &jsonYAMLOptions{ForceSeqIndentStyle: yaml.CompactSequenceStyle} +} + +func WithWideSeqIndent() JSONYAMLOption { + return &jsonYAMLOptions{ForceSeqIndentStyle: yaml.WideSequenceStyle} +} + +func WithNoCommentsCopy() JSONYAMLOption { + return &jsonYAMLOptions{CopyComments: pointer.Bool(false)} +} + +type JSONYAMLOption interface { + applyToJSONYAML(*jsonYAMLOptions) +} + +type jsonYAMLOptions struct { + Indentation *string + // Only applicable to YAML; either yaml.CompactSequenceStyle or yaml.WideSequenceStyle + ForceSeqIndentStyle yaml.SequenceIndentStyle + + CopyComments *bool +} + +func defaultJSONYAMLOptions() *jsonYAMLOptions { + return (&jsonYAMLOptions{ + Indentation: pointer.String(""), + CopyComments: pointer.Bool(true), + }) +} + +func (o *jsonYAMLOptions) applyToJSONYAML(target *jsonYAMLOptions) { + if o.Indentation != nil { + target.Indentation = o.Indentation + } + if len(o.ForceSeqIndentStyle) != 0 { + target.ForceSeqIndentStyle = o.ForceSeqIndentStyle + } + if o.CopyComments != nil { + target.CopyComments = o.CopyComments + } +} + +func (o *jsonYAMLOptions) applyOptions(opts []JSONYAMLOption) *jsonYAMLOptions { + for _, opt := range opts { + opt.applyToJSONYAML(o) + } + return o +} + +type defaultSanitizer struct { + opts *jsonYAMLOptions +} + +func (s *defaultSanitizer) Sanitize(ctx context.Context, ct content.ContentType, frame []byte) ([]byte, error) { + switch ct { + case content.ContentTypeYAML: + return s.handleYAML(ctx, frame) + case content.ContentTypeJSON: + return s.handleJSON(frame) + default: + // Just passthrough + return frame, nil + } +} + +func (defaultSanitizer) SupportedContentTypes() content.ContentTypes { + return []content.ContentType{content.ContentTypeYAML, content.ContentTypeJSON} +} + +var ErrTooManyFrames = errors.New("too many frames") + +func (s *defaultSanitizer) handleYAML(ctx context.Context, frame []byte) ([]byte, error) { + // Get prior data, if any (from the context), that we'll use to copy comments over and + // infer the sequence indenting style. + priorData, hasPriorData := GetPriorData(ctx) + + // Parse the current node + frameNodes, err := (&kio.ByteReader{ + Reader: bytes.NewReader(append([]byte{'\n'}, frame...)), + DisableUnwrapping: true, + OmitReaderAnnotations: true, + }).Read() + if err != nil { + return nil, err + } + if len(frameNodes) == 0 { + return []byte{}, nil + } else if len(frameNodes) != 1 { + return nil, ErrTooManyFrames + } + frameNode := frameNodes[0] + + if hasPriorData && s.opts.CopyComments != nil && *s.opts.CopyComments { + priorNode, err := yaml.Parse(string(priorData)) + if err != nil { + return nil, err + } + // Copy comments over + if err := comments.CopyComments(priorNode, frameNode, true); err != nil { + return nil, err + } + } + + return yaml.MarshalWithOptions(frameNode.Document(), &yaml.EncoderOptions{ + SeqIndent: s.resolveSeqStyle(frame, priorData, hasPriorData), + }) +} + +func (s *defaultSanitizer) resolveSeqStyle(frame, priorData []byte, hasPriorData bool) yaml.SequenceIndentStyle { + // If specified, use these; can be used as "force-formatting" directives for consistency + if len(s.opts.ForceSeqIndentStyle) != 0 { + return s.opts.ForceSeqIndentStyle + } + // Otherwise, autodetect the indentation from prior data, if exists, or the current frame + // If the sequence style cannot be derived; the compact form will be used + var deriveYAML string + if hasPriorData { + deriveYAML = string(priorData) + } else { + deriveYAML = string(frame) + } + return yaml.SequenceIndentStyle(yaml.DeriveSeqIndentStyle(deriveYAML)) +} + +func (s *defaultSanitizer) handleJSON(frame []byte) ([]byte, error) { + // If it's all whitespace, just return an empty byte array, no actual content here + if len(bytes.TrimSpace(frame)) == 0 { + return []byte{}, nil + } + var buf bytes.Buffer + var err error + if s.opts.Indentation == nil || len(*s.opts.Indentation) == 0 { + err = json.Compact(&buf, frame) + } else { + err = json.Indent(&buf, frame, "", *s.opts.Indentation) + } + if err != nil { + return nil, err + } + // Trim all other spaces than an ending newline + return append(bytes.TrimSpace(buf.Bytes()), '\n'), nil +} + +func IfSupported(ctx context.Context, s Sanitizer, ct content.ContentType, frame []byte) ([]byte, error) { + // If the content type isn't supported, nothing to do + if s == nil || !s.SupportedContentTypes().Has(ct) { + return frame, nil + } + return s.Sanitize(ctx, ct, frame) +} + +func WithPriorData(ctx context.Context, frame []byte) context.Context { + return context.WithValue(ctx, priorDataKey, frame) +} + +func GetPriorData(ctx context.Context) ([]byte, bool) { + b, ok := ctx.Value(priorDataKey).([]byte) + return b, ok +} + +type priorDataKeyStruct struct{} + +var priorDataKey = priorDataKeyStruct{} diff --git a/pkg/frame/sanitize/sanitize_test.go b/pkg/frame/sanitize/sanitize_test.go new file mode 100644 index 00000000..cb8682a3 --- /dev/null +++ b/pkg/frame/sanitize/sanitize_test.go @@ -0,0 +1,460 @@ +package sanitize + +import ( + "context" + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaveworks/libgitops/pkg/content" +) + +func Test_defaultSanitizer_Sanitize(t *testing.T) { + tests := []struct { + name string + opts []JSONYAMLOption + ct content.ContentType + prior string + frame string + want string + wantErr error + checkErr func(error) bool + }{ + { + name: "passthrough whatever", + ct: content.ContentType("unknown"), + frame: "{randomdata:", + want: "{randomdata:", + }, + { + name: "default compact", + ct: content.ContentTypeJSON, + frame: `{ + "foo": { + "bar": "baz" + } + }`, + opts: []JSONYAMLOption{}, + want: `{"foo":{"bar":"baz"}} +`, + }, + { + name: "with two spaces", + ct: content.ContentTypeJSON, + frame: ` { "foo" : "bar" } +`, + opts: []JSONYAMLOption{WithSpacesIndent(2)}, + want: `{ + "foo": "bar" +} +`, + }, + { + name: "with four spaces", + ct: content.ContentTypeJSON, + frame: ` { "foo" : {"bar": "baz"} } +`, + opts: []JSONYAMLOption{WithSpacesIndent(4)}, + want: `{ + "foo": { + "bar": "baz" + } +} +`, + }, + { + name: "with tab indent", + ct: content.ContentTypeJSON, + frame: ` { "foo" : {"bar": "baz"} } +`, + opts: []JSONYAMLOption{WithTabsIndent(1)}, + want: `{ + "foo": { + "bar": "baz" + } +} +`, + }, + { + name: "with malformed", + ct: content.ContentTypeJSON, + frame: `{"foo":"`, + opts: []JSONYAMLOption{WithCompactIndent()}, + checkErr: func(err error) bool { + _, ok := err.(*json.SyntaxError) + return ok + }, + }, + { + name: "only whitespace", + ct: content.ContentTypeJSON, + frame: ` + + `, + want: "", + }, + { + name: "no json", + ct: content.ContentTypeJSON, + frame: "", + want: "", + }, + { + name: "weird empty formatting", + ct: content.ContentTypeYAML, + frame: ` +--- + + + `, + want: "", + }, + { + name: "no yaml", + ct: content.ContentTypeYAML, + frame: "", + want: "", + }, + { + name: "too many frames", + ct: content.ContentTypeYAML, + frame: `aa: true +--- +bb: false +`, + wantErr: ErrTooManyFrames, + }, + { + name: "make sure lists are not expanded", + ct: content.ContentTypeYAML, + frame: `--- +kind: List +apiVersion: "v1" +items: +- name: 123 +- name: 456 +`, + want: `kind: List +apiVersion: "v1" +items: +- name: 123 +- name: 456 +`, + }, + { + name: "yaml format; don't be confused by the bar commend", + ct: content.ContentTypeYAML, + frame: `--- + +kind: List +# foo +apiVersion: "v1" +items: + # bar +- name: 123 + +`, + want: `kind: List +# foo +apiVersion: "v1" +items: +# bar +- name: 123 +`, + }, + { + name: "detect indentation; don't be confused by the bar commend", + ct: content.ContentTypeYAML, + frame: `--- + +kind: List +# foo +apiVersion: "v1" +items: +# bar + - name: 123 + +`, + want: `kind: List +# foo +apiVersion: "v1" +items: + # bar + - name: 123 +`, + }, + { + name: "force compact", + ct: content.ContentTypeYAML, + opts: []JSONYAMLOption{WithCompactSeqIndent()}, + frame: `--- + +kind: List +# foo +apiVersion: "v1" +items: + # bar + - name: 123 + +`, + want: `kind: List +# foo +apiVersion: "v1" +items: +# bar +- name: 123 +`, + }, + { + name: "force wide", + ct: content.ContentTypeYAML, + opts: []JSONYAMLOption{WithWideSeqIndent()}, + frame: `--- + +kind: List +# foo +apiVersion: "v1" +items: +# bar +- name: 123 + +`, + want: `kind: List +# foo +apiVersion: "v1" +items: + # bar + - name: 123 +`, + }, + { + name: "invalid indentation", + ct: content.ContentTypeYAML, + frame: `--- + +kind: "foo" + bar: true`, + checkErr: func(err error) bool { + return err.Error() == "yaml: line 1: did not find expected key" + }, + }, + { + name: "infer seq style from prior; default is compact", + ct: content.ContentTypeYAML, + opts: []JSONYAMLOption{}, + prior: `# root +# no lists here to look at + +kind: List # foo +# bla +apiVersion: v1 +`, + frame: `--- +kind: List +apiVersion: v1 +items: + - item1 # hello + - item2 +`, + want: `# root +# no lists here to look at + +kind: List # foo +# bla +apiVersion: v1 +items: +- item1 # hello +- item2 +`, + }, + { + name: "copy comments; infer seq style from prior", + ct: content.ContentTypeYAML, + opts: []JSONYAMLOption{}, + prior: `# root +# hello + +kind: List # foo +# bla +apiVersion: v1 +notexist: foo # remember me! + +items: +# ignoreme + - item1 # hello + # bla + - item2 # hi + # after`, + frame: `--- +kind: List +apiVersion: v1 +fruits: +- fruit1 +items: +- item1 +- item2 +- item3 +`, + want: `# root +# hello +# Comments lost during file manipulation: +# Field "notexist": "remember me!" + +kind: List # foo +# bla +apiVersion: v1 +fruits: + - fruit1 +items: + # ignoreme + - item1 # hello + # bla + - item2 # hi + # after + + - item3 +`, + }, + { + name: "don't copy comments; infer from prior", + ct: content.ContentTypeYAML, + opts: []JSONYAMLOption{WithNoCommentsCopy()}, + prior: `# root +# hello + +kind: List # foo +# bla +apiVersion: v1 +notexist: foo # remember me! + +items: +# ignoreme +- item1 # hello + # bla + - item2 # trying to trick the system; but it should make style choice based on item1 + # after`, + frame: `--- +kind: List +apiVersion: v1 +fruits: +- fruit1 # new +items: # new +- item1 +- item2 +# new +- item3 +`, + want: `kind: List +apiVersion: v1 +fruits: +- fruit1 # new +items: # new +- item1 +- item2 +# new +- item3 +`, + }, + { + name: "invalid prior", + ct: content.ContentTypeYAML, + prior: `# root +# hello + +kind: List # foo +# bla +apiVersion: v1 +notexist: foo # remember me! + +items: +# ignoreme + - item1 # hello + # bla +- item2 # trying to trick the system; but it should make style choice based on item1 + # after`, + frame: `--- +kind: List +apiVersion: v1 +fruits: +- fruit1 # new +items: # new +- item1 +- item2 +# new +- item3 +`, + checkErr: func(err error) bool { + return err.Error() == "yaml: line 3: did not find expected key" + }, + }, + { + name: "invalid copy comments; change from scalar to mapping node", + ct: content.ContentTypeYAML, + prior: `# root +foo: "bar" # baz`, + frame: ` +foo: + name: "bar" +`, + checkErr: func(err error) bool { + // from sigs.k8s.io/kustomize/kyaml/yaml/fns.go:728 + return err.Error() == `wrong Node Kind for expected: ScalarNode was MappingNode: value: {name: "bar"}` + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ctx := context.Background() + s := NewJSONYAML(tt.opts...) + if len(tt.prior) != 0 { + ctx = WithPriorData(ctx, []byte(tt.prior)) + } + got, err := s.Sanitize(ctx, tt.ct, []byte(tt.frame)) + assert.Equal(t, tt.want, string(got)) + if tt.checkErr != nil { + assert.True(t, tt.checkErr(err)) + } else { + assert.ErrorIs(t, err, tt.wantErr) + } + }) + } +} + +func TestIfSupported(t *testing.T) { + ctx := context.Background() + tests := []struct { + name string + s Sanitizer + ct content.ContentType + frame string + want string + wantErr bool + }{ + { + name: "nil sanitizer", + frame: "foo", + want: "foo", + }, + { + name: "unknown content type", + s: NewJSONYAML(), + ct: content.ContentType("unknown"), + frame: "foo", + want: "foo", + }, + { + name: "sanitize", + s: NewJSONYAML(WithCompactIndent()), + ct: content.ContentTypeJSON, + frame: ` { "foo" : true } `, + want: `{"foo":true} +`, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, _ := IfSupported(ctx, tt.s, tt.ct, []byte(tt.frame)) + assert.Equal(t, tt.want, string(got)) + }) + } +} diff --git a/pkg/frame/single.go b/pkg/frame/single.go new file mode 100644 index 00000000..27470721 --- /dev/null +++ b/pkg/frame/single.go @@ -0,0 +1,48 @@ +package frame + +import ( + "context" + "io" + + "github.com/weaveworks/libgitops/pkg/content" +) + +func newSingleReader(r content.Reader, ct content.ContentType, o *singleReaderOptions) Reader { + // Make sure not more than this set of bytes can be read + r, _ = content.WrapLimited(r, o.MaxFrameSize) + return &singleReader{ + // TODO: Apply options? + MetadataContainer: r.ContentMetadata().Clone().ToContainer(), + ContentTyped: ct, + r: r, + } +} + +// singleReader implements reading a single frame (up to a certain limit) from an io.ReadCloser. +// It MUST be wrapped in a higher-level composite Reader like the highlevelReader to satisfy the +// Reader interface correctly. +type singleReader struct { + content.MetadataContainer + content.ContentTyped + r content.Reader + hasBeenRead bool +} + +// Read the whole frame from the underlying io.Reader, up to a given limit +func (r *singleReader) ReadFrame(ctx context.Context) ([]byte, error) { + if r.hasBeenRead { + // This really should never happen, because the higher-level Reader should ensure + // no more than one frame can be read from the downstream as opts.MaxFrameCount == 1. + return nil, io.EOF // TODO: What about the third time? + } + // Mark we are now the frame (regardless of the result) + r.hasBeenRead = true + // Read the whole frame from the underlying io.Reader, up to a given amount + frame, err := io.ReadAll(r.r.WithContext(ctx)) + if err != nil { + return nil, err + } + return frame, nil +} + +func (r *singleReader) Close(ctx context.Context) error { return r.r.WithContext(ctx).Close() } diff --git a/pkg/frame/utils.go b/pkg/frame/utils.go new file mode 100644 index 00000000..ebf676c4 --- /dev/null +++ b/pkg/frame/utils.go @@ -0,0 +1,78 @@ +package frame + +import ( + "context" + "errors" + "io" + + "github.com/weaveworks/libgitops/pkg/tracing" + "go.opentelemetry.io/otel/trace" +) + +// List is a list of list (byte arrays), used for convenience functions +type List [][]byte + +// ListFromReader is a convenience method that constructs a List by reading +// from the given Reader r until io.EOF. If an other error than io.EOF is returned, +// reading is aborted immediately and the error is returned. +func ListFromReader(ctx context.Context, r Reader) (List, error) { + var f List + for { + // Read until we get io.EOF or an error + frame, err := r.ReadFrame(ctx) + if errors.Is(err, io.EOF) { + break + } else if err != nil { + return nil, err + } + // Append all list to the returned list + f = append(f, frame) + } + return f, nil +} + +func ListFromBytes(list ...[]byte) List { return list } + +// WriteTo is a convenience method that writes a set of list to a Writer. +// If an error occurs, writing stops and the error is returned. +func (f List) WriteTo(ctx context.Context, fw Writer) error { + // Loop all list in the list, and write them individually to the Writer + for _, frame := range f { + if err := fw.WriteFrame(ctx, frame); err != nil { + return err + } + } + return nil +} + +// ToIoWriteCloser transforms a Writer to an io.WriteCloser, by binding a relevant +// context.Context to it. If err != nil, then n == 0. If err == nil, then n == len(frame). +func ToIoWriteCloser(ctx context.Context, w Writer) io.WriteCloser { + return &ioWriterHelper{ctx, w} +} + +type ioWriterHelper struct { + ctx context.Context + parent Writer +} + +func (w *ioWriterHelper) Write(frame []byte) (n int, err error) { + if err := w.parent.WriteFrame(w.ctx, frame); err != nil { + return 0, err + } + return len(frame), nil +} +func (w *ioWriterHelper) Close() error { + return w.parent.Close(w.ctx) +} + +func closeWithTrace(ctx context.Context, c Closer, obj interface{}) error { + return tracing.FromContext(ctx, obj).TraceFunc(ctx, "Close", func(ctx context.Context, _ trace.Span) error { + return c.Close(ctx) + }).Register() +} + +// nopCloser returns nil when Close(ctx) is called +type nopCloser struct{} + +func (*nopCloser) Close(context.Context) error { return nil } diff --git a/pkg/frame/utils_test.go b/pkg/frame/utils_test.go new file mode 100644 index 00000000..9eb10cd5 --- /dev/null +++ b/pkg/frame/utils_test.go @@ -0,0 +1,119 @@ +package frame + +import ( + "bytes" + "context" + "io" + "io/fs" + "io/ioutil" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/tracing" + "github.com/weaveworks/libgitops/pkg/util/compositeio" + "github.com/weaveworks/libgitops/pkg/util/limitedio" +) + +type rawCloserExposer interface { + RawCloser() io.Closer +} + +func TestFromConstructors(t *testing.T) { + yamlPath := filepath.Join(t.TempDir(), "foo.yaml") + str := "foo: bar\n" + byteContent := []byte(str) + err := ioutil.WriteFile(yamlPath, byteContent, 0644) + require.Nil(t, err) + + ctx := tracing.BackgroundTracingContext() + // FromYAMLFile -- found + got, err := FromYAMLFile(yamlPath).ReadFrame(ctx) + assert.Nil(t, err) + assert.Equal(t, str, string(got)) + // content.FromFile -- already closed + f := content.FromFile(yamlPath) + (f.(rawCloserExposer)).RawCloser().Close() // deliberately close the file before giving it to the reader + got, err = NewYAMLReader(f).ReadFrame(ctx) + assert.ErrorIs(t, err, fs.ErrClosed) + assert.Empty(t, got) + // FromYAMLFile -- not found + got, err = FromYAMLFile(filepath.Join(t.TempDir(), "notexist.yaml")).ReadFrame(ctx) + assert.ErrorIs(t, err, fs.ErrNotExist) + assert.Empty(t, got) + // FromYAMLBytes + got, err = FromYAMLBytes(byteContent).ReadFrame(ctx) + assert.Nil(t, err) + assert.Equal(t, byteContent, got) + // FromYAMLString + got, err = FromYAMLString(str).ReadFrame(ctx) + assert.Nil(t, err) + assert.Equal(t, str, string(got)) + assert.Nil(t, tracing.ForceFlushGlobal(ctx, 0)) +} + +func TestToIoWriteCloser(t *testing.T) { + var buf bytes.Buffer + closeRec := &recordingCloser{} + cw := content.NewWriter(compositeio.WriteCloser(&buf, closeRec)) + w := NewYAMLWriter(cw, SingleOptions{MaxFrameSize: limitedio.Limit(testYAMLlen)}) + ctx := tracing.BackgroundTracingContext() + iow := ToIoWriteCloser(ctx, w) + + byteContent := []byte(testYAML) + n, err := iow.Write(byteContent) + assert.Len(t, byteContent, n) + assert.Nil(t, err) + + // Check closing forwarding + assert.Nil(t, iow.Close()) + assert.Equal(t, 1, closeRec.count) + + // Try writing again + overflowContent := []byte(testYAML + testYAML) + n, err = iow.Write(overflowContent) + assert.Equal(t, 0, n) + assert.ErrorIs(t, err, &limitedio.ReadSizeOverflowError{}) + // Assume the writer has been closed only once + assert.Equal(t, 1, closeRec.count) + assert.Equal(t, buf.String(), yamlSep+string(byteContent)) + + assert.Nil(t, tracing.ForceFlushGlobal(context.Background(), 0)) +} + +func TestListFromReader(t *testing.T) { + ctx := tracing.BackgroundTracingContext() + // Happy case + fr, err := ListFromReader(ctx, FromYAMLString(messyYAML)) + assert.Equal(t, List{[]byte(testYAML), []byte(testYAML)}, fr) + assert.Nil(t, err) + + // Non-happy case + r := NewJSONReader(content.FromString(testJSON2), SingleOptions{MaxFrameSize: limitedio.Limit(testJSONlen - 1)}) + fr, err = ListFromReader(ctx, r) + assert.Len(t, fr, 0) + assert.ErrorIs(t, err, &limitedio.ReadSizeOverflowError{}) + assert.Nil(t, tracing.ForceFlushGlobal(ctx, 0)) +} + +func TestList_WriteTo(t *testing.T) { + var buf bytes.Buffer + // TODO: Automatically get the name of the writer passed in, to avoid having to name + // everything. i.e. content.NewWriterName(string, io.Writer) + cw := content.NewWriter(&buf) + w := NewYAMLWriter(cw) + ctx := context.Background() + // Happy case + err := ListFromBytes([]byte(testYAML), []byte(testYAML)).WriteTo(ctx, w) + assert.Equal(t, buf.String(), yamlSep+testYAML+yamlSep+testYAML) + assert.Nil(t, err) + + // Non-happy case + buf.Reset() + w = NewJSONWriter(cw, SingleOptions{MaxFrameSize: limitedio.Limit(testJSONlen)}) + err = ListFromBytes([]byte(testJSON), []byte(testJSON2)).WriteTo(ctx, w) + assert.Equal(t, buf.String(), testJSON) + assert.ErrorIs(t, err, &limitedio.ReadSizeOverflowError{}) +} diff --git a/pkg/frame/writer.go b/pkg/frame/writer.go new file mode 100644 index 00000000..5a6f93fc --- /dev/null +++ b/pkg/frame/writer.go @@ -0,0 +1,76 @@ +package frame + +import ( + "context" + "sync" + + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/frame/sanitize" + "github.com/weaveworks/libgitops/pkg/tracing" + "github.com/weaveworks/libgitops/pkg/util/limitedio" + "go.opentelemetry.io/otel/trace" +) + +func newHighlevelWriter(w Writer, opts *writerOptions) Writer { + return &highlevelWriter{ + writer: w, + writerMu: &sync.Mutex{}, + opts: opts, + } +} + +type highlevelWriter struct { + writer Writer + writerMu *sync.Mutex + opts *writerOptions + // frameCount counts the amount of successful frames written + frameCount int64 +} + +func (w *highlevelWriter) WriteFrame(ctx context.Context, frame []byte) error { + w.writerMu.Lock() + defer w.writerMu.Unlock() + + return tracing.FromContext(ctx, w).TraceFunc(ctx, "WriteFrame", func(ctx context.Context, span trace.Span) error { + // Refuse to write too large frames + if w.opts.MaxFrameSize.IsLessThan(int64(len(frame))) { + return limitedio.ErrReadSizeOverflow(w.opts.MaxFrameSize) + } + // Refuse to write more than the maximum amount of frames + if w.opts.MaxFrameCount.IsLessThanOrEqual(w.frameCount) { + return ErrFrameCountOverflow(w.opts.MaxFrameCount) + } + + // Sanitize the frame + // TODO: Maybe create a composite writer that actually reads the given frame first, to + // fully sanitize/validate it, and first then write the frames out using the writer? + frame, err := sanitize.IfSupported(ctx, w.opts.Sanitizer, w.ContentType(), frame) + if err != nil { + return err + } + + // Register the amount of (sanitized) bytes and call the underlying Writer + span.SetAttributes(content.SpanAttrByteContent(frame)...) + + // Catch empty frames + if len(frame) == 0 { + return nil + } + + err = w.writer.WriteFrame(ctx, frame) + + // Increase the frame counter, if the write was successful + if err == nil { + w.frameCount += 1 + } + return err + }).Register() +} + +func (w *highlevelWriter) ContentType() content.ContentType { return w.writer.ContentType() } +func (w *highlevelWriter) Close(ctx context.Context) error { + return closeWithTrace(ctx, w.writer, w) +} + +// Just forward the metadata, don't do anything specific with it +func (w *highlevelWriter) ContentMetadata() content.Metadata { return w.writer.ContentMetadata() } diff --git a/pkg/frame/writer_delegate.go b/pkg/frame/writer_delegate.go new file mode 100644 index 00000000..fa968e97 --- /dev/null +++ b/pkg/frame/writer_delegate.go @@ -0,0 +1,58 @@ +package frame + +import ( + "context" + "io" + + "github.com/weaveworks/libgitops/pkg/content" +) + +func newDelegatingWriter(ct content.ContentType, w content.Writer) Writer { + return &delegatingWriter{ + // TODO: Register options? + MetadataContainer: w.ContentMetadata().Clone().ToContainer(), + ContentTyped: ct, + w: w, + } +} + +// delegatingWriter is an implementation of the Writer interface +type delegatingWriter struct { + content.MetadataContainer + content.ContentTyped + w content.Writer +} + +func (w *delegatingWriter) WriteFrame(ctx context.Context, frame []byte) error { + // Write the frame to the underlying writer + n, err := w.w.WithContext(ctx).Write(frame) + // Guard against short writes + return catchShortWrite(n, err, frame) +} + +func (w *delegatingWriter) Close(ctx context.Context) error { return w.w.WithContext(ctx).Close() } + +func newErrWriter(ct content.ContentType, err error, meta content.Metadata) Writer { + return &errWriter{ + meta.Clone().ToContainer(), + ct, + &nopCloser{}, + err, + } +} + +type errWriter struct { + content.MetadataContainer + content.ContentTyped + Closer + err error +} + +func (w *errWriter) WriteFrame(context.Context, []byte) error { return w.err } + +func catchShortWrite(n int, err error, frame []byte) error { + if n < len(frame) && err == nil { + err = io.ErrShortWrite + } + return err +} diff --git a/pkg/frame/writer_factory.go b/pkg/frame/writer_factory.go new file mode 100644 index 00000000..1191648c --- /dev/null +++ b/pkg/frame/writer_factory.go @@ -0,0 +1,50 @@ +package frame + +import ( + "io" + + "github.com/weaveworks/libgitops/pkg/content" + "k8s.io/apimachinery/pkg/runtime/serializer/json" +) + +func (defaultFactory) NewWriter(ct content.ContentType, w content.Writer, opts ...WriterOption) Writer { + o := defaultWriterOptions().applyOptions(opts) + + var lowlevel Writer + switch ct { + case content.ContentTypeYAML: + lowlevel = newDelegatingWriter(content.ContentTypeYAML, w.Wrap(func(underlying io.WriteCloser) io.Writer { + // This writer always prepends a "---" before each frame + return json.YAMLFramer.NewFrameWriter(underlying) + })) + case content.ContentTypeJSON: + // JSON documents are self-framing; hence, no need to wrap the writer in any way + lowlevel = newDelegatingWriter(content.ContentTypeJSON, w) + default: + return newErrWriter(ct, content.ErrUnsupportedContentType(ct), w.ContentMetadata()) + } + return newHighlevelWriter(lowlevel, o) +} + +func (defaultFactory) NewSingleWriter(ct content.ContentType, w content.Writer, opts ...SingleWriterOption) Writer { + o := defaultSingleWriterOptions().applyOptions(opts) + + return newHighlevelWriter(newDelegatingWriter(ct, w), &writerOptions{ + Options: Options{ + SingleOptions: o.SingleOptions, + MaxFrameCount: 1, + }, + }) +} + +func (f defaultFactory) NewRecognizingWriter(w content.Writer, opts ...RecognizingWriterOption) Writer { + o := defaultRecognizingWriterOptions().applyOptions(opts) + + // Recognize the content type using the given recognizer + r, ct, err := content.NewRecognizingWriter(w, o.Recognizer) + if err != nil { + return newErrWriter("", err, r.ContentMetadata()) + } + // Re-use the logic of the "main" Writer constructor; validate ct there + return f.NewWriter(ct, w, o) +} diff --git a/pkg/frame/writer_test.go b/pkg/frame/writer_test.go new file mode 100644 index 00000000..80281407 --- /dev/null +++ b/pkg/frame/writer_test.go @@ -0,0 +1,34 @@ +package frame + +import ( + "bytes" + "context" + "io" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/weaveworks/libgitops/pkg/content" +) + +func TestNewWriter_Unrecognized(t *testing.T) { + fr := DefaultFactory().NewWriter(content.ContentType("doesnotexist"), content.NewWriter(io.Discard)) + ctx := context.Background() + err := fr.WriteFrame(ctx, make([]byte, 1)) + assert.ErrorIs(t, err, &content.UnsupportedContentTypeError{}) +} + +func TestWriterShortBuffer(t *testing.T) { + var buf bytes.Buffer + w := &halfWriter{&buf} + ctx := context.Background() + err := NewYAMLWriter(content.NewWriter(w)).WriteFrame(ctx, []byte("foo: bar")) + assert.Equal(t, io.ErrShortWrite, err) +} + +type halfWriter struct { + w io.Writer +} + +func (w *halfWriter) Write(p []byte) (int, error) { + return w.w.Write(p[0 : (len(p)+1)/2]) +} diff --git a/pkg/tracing/logging.go b/pkg/tracing/logging.go new file mode 100644 index 00000000..f4f8269d --- /dev/null +++ b/pkg/tracing/logging.go @@ -0,0 +1,133 @@ +package tracing + +import ( + "context" + + "github.com/go-logr/logr" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" + ctrllog "sigs.k8s.io/controller-runtime/pkg/log" +) + +func NewLoggingTracerProvider(tp trace.TracerProvider) SDKTracerProvider { + return &loggingTracerProvider{tp} +} + +type loggingTracerProvider struct { + tp trace.TracerProvider +} + +func (tp *loggingTracerProvider) Tracer(instrumentationName string, opts ...trace.TracerOption) trace.Tracer { + tracer := tp.tp.Tracer(instrumentationName, opts...) + return &loggingTracer{provider: tp, tracer: tracer, name: instrumentationName} +} + +func (tp *loggingTracerProvider) Shutdown(ctx context.Context) error { + p, ok := tp.tp.(SDKTracerProvider) + if !ok { + return nil + } + return p.Shutdown(ctx) +} + +func (tp *loggingTracerProvider) ForceFlush(ctx context.Context) error { + p, ok := tp.tp.(SDKTracerProvider) + if !ok { + return nil + } + return p.ForceFlush(ctx) +} + +type loggingTracer struct { + provider trace.TracerProvider + tracer trace.Tracer + name string +} + +func (t *loggingTracer) Start(ctx context.Context, spanName string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + log := ctrllog.FromContext(ctx).WithName(t.name) //.WithValues(spanNameKey, spanName) + spanCfg := trace.NewSpanStartConfig(opts...) + startLog := log + if len(spanCfg.Attributes()) != 0 { + startLog = startLog.WithValues(spanAttributesKey, spanCfg.Attributes()) + } + startLog.Info("starting span") + + ctx, span := t.tracer.Start(ctx, spanName, opts...) + logSpan := &loggingSpan{t.provider, log, span, spanName} + ctx = trace.ContextWithSpan(ctx, logSpan) + return ctx, logSpan +} + +type loggingSpan struct { + provider trace.TracerProvider + log logr.Logger + span trace.Span + spanName string +} + +const ( + spanNameKey = "span-name" + spanEventKey = "span-event" + spanStatusCodeKey = "span-status-code" + spanStatusDescriptionKey = "span-status-description" + spanAttributesKey = "span-attributes" +) + +func (s *loggingSpan) End(options ...trace.SpanEndOption) { + s.log.Info("ending span") + s.span.End(options...) +} + +// AddEvent adds an event with the provided name and options. +func (s *loggingSpan) AddEvent(name string, options ...trace.EventOption) { + s.log.Info("recorded span event", spanEventKey, name) + s.span.AddEvent(name, options...) +} + +// IsRecording returns the recording state of the Span. It will return +// true if the Span is active and events can be recorded. +func (s *loggingSpan) IsRecording() bool { return s.span.IsRecording() } + +// RecordError will record err as an exception span event for this span. An +// additional call to SetStatus is required if the Status of the Span should +// be set to Error, as this method does not change the Span status. If this +// span is not being recorded or err is nil then this method does nothing. +func (s *loggingSpan) RecordError(err error, options ...trace.EventOption) { + s.log.Error(err, "recorded span error") + s.span.RecordError(err, options...) +} + +// SpanContext returns the SpanContext of the Span. The returned SpanContext +// is usable even after the End method has been called for the Span. +func (s *loggingSpan) SpanContext() trace.SpanContext { return s.span.SpanContext() } + +// SetStatus sets the status of the Span in the form of a code and a +// description, overriding previous values set. The description is only +// included in a status when the code is for an error. +func (s *loggingSpan) SetStatus(code codes.Code, description string) { + s.log.Info("recorded span status change", + spanStatusCodeKey, code.String(), + spanStatusDescriptionKey, description) + s.span.SetStatus(code, description) +} + +// SetName sets the Span name. +func (s *loggingSpan) SetName(name string) { + s.log.Info("recorded span name change", spanNameKey, name) + s.log = s.log.WithValues(spanNameKey, name) + s.span.SetName(name) +} + +// SetAttributes sets kv as attributes of the Span. If a key from kv +// already exists for an attribute of the Span it will be overwritten with +// the value contained in kv. +func (s *loggingSpan) SetAttributes(kv ...attribute.KeyValue) { + s.log.Info("recorded span attribute change", spanAttributesKey, kv) + s.span.SetAttributes(kv...) +} + +// TracerProvider returns a TracerProvider that can be used to generate +// additional Spans on the same telemetry pipeline as the current Span. +func (s *loggingSpan) TracerProvider() trace.TracerProvider { return s.provider } diff --git a/pkg/tracing/tracer_provider.go b/pkg/tracing/tracer_provider.go new file mode 100644 index 00000000..622b6d30 --- /dev/null +++ b/pkg/tracing/tracer_provider.go @@ -0,0 +1,248 @@ +package tracing + +import ( + "context" + "errors" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/exporters/jaeger" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" + "go.opentelemetry.io/otel/sdk/resource" + tracesdk "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.4.0" + "go.opentelemetry.io/otel/trace" + "go.uber.org/multierr" +) + +type SDKTracerProvider interface { + trace.TracerProvider + Shutdown(ctx context.Context) error + ForceFlush(ctx context.Context) error +} + +// NewBuilder returns a new TracerProviderBuilder instance. +func NewBuilder() TracerProviderBuilder { + return &builder{} +} + +// TracerProviderBuilder is a builder for a TracerProviderWithShutdown. +type TracerProviderBuilder interface { + // RegisterInsecureOTelExporter registers an exporter to an OpenTelemetry Collector on the + // given address, which defaults to "localhost:55680" if addr is empty. The OpenTelemetry + // Collector speaks gRPC, hence, don't add any "http(s)://" prefix to addr. The OpenTelemetry + // Collector is just a proxy, it in turn can forward e.g. traces to Jaeger and metrics to + // Prometheus. Additional options can be supplied that can override the default behavior. + RegisterInsecureOTelExporter(ctx context.Context, addr string, opts ...otlptracegrpc.Option) TracerProviderBuilder + + // RegisterInsecureJaegerExporter registers an exporter to Jaeger using Jaeger's own HTTP API. + // The default address is "http://localhost:14268/api/traces" if addr is left empty. + // Additional options can be supplied that can override the default behavior. + RegisterInsecureJaegerExporter(addr string, opts ...jaeger.CollectorEndpointOption) TracerProviderBuilder + + // RegisterStdoutExporter exports pretty-formatted telemetry data to os.Stdout, or another writer if + // stdouttrace.WithWriter(w) is supplied as an option. Note that stdouttrace.WithoutTimestamps() doesn't + // work due to an upstream bug in OpenTelemetry. TODO: Fix that issue upstream. + RegisterStdoutExporter(opts ...stdouttrace.Option) TracerProviderBuilder + + // WithOptions allows configuring the TracerProvider in various ways, e.g. tracesdk.WithSpanProcessor(sp) + // or tracesdk.WithIDGenerator() + WithOptions(opts ...tracesdk.TracerProviderOption) TracerProviderBuilder + + // WithAttributes allows registering more default attributes for traces created by this TracerProvider. + // By default semantic conventions of version v1.4.0 are used, with "service.name" => "libgitops". + WithAttributes(attrs ...attribute.KeyValue) TracerProviderBuilder + + // WithSynchronousExports allows configuring whether the exporters should export in synchronous mode + // (which must be used ONLY for testing) or (by default) the batching mode. + WithSynchronousExports(sync bool) TracerProviderBuilder + + WithLogging(log bool) TracerProviderBuilder + + // Build builds the SDKTracerProvider. + Build() (SDKTracerProvider, error) + + // InstallGlobally builds the TracerProvider and registers it globally using otel.SetTracerProvider(tp). + InstallGlobally() error +} + +type builder struct { + exporters []tracesdk.SpanExporter + errs []error + tpOpts []tracesdk.TracerProviderOption + attrs []attribute.KeyValue + sync bool + log bool +} + +func (b *builder) RegisterInsecureOTelExporter(ctx context.Context, addr string, opts ...otlptracegrpc.Option) TracerProviderBuilder { + if len(addr) == 0 { + addr = "localhost:55680" + } + + defaultOpts := []otlptracegrpc.Option{ + otlptracegrpc.WithEndpoint(addr), + otlptracegrpc.WithInsecure(), + } + // Make sure to order the defaultOpts first, so opts can override the default ones + opts = append(defaultOpts, opts...) + // Run the main constructor for the otlptracegrpc exporter + exp, err := otlptracegrpc.New(ctx, opts...) + b.exporters = append(b.exporters, exp) + b.errs = append(b.errs, err) + return b +} + +func (b *builder) RegisterInsecureJaegerExporter(addr string, opts ...jaeger.CollectorEndpointOption) TracerProviderBuilder { + defaultOpts := []jaeger.CollectorEndpointOption{} + // Only override if addr is set. Default is "http://localhost:14268/api/traces" + if len(addr) != 0 { + defaultOpts = append(defaultOpts, jaeger.WithEndpoint(addr)) + } + // Make sure to order the defaultOpts first, so opts can override the default ones + opts = append(defaultOpts, opts...) + // Run the main constructor for the jaeger exporter + exp, err := jaeger.New(jaeger.WithCollectorEndpoint(opts...)) + b.exporters = append(b.exporters, exp) + b.errs = append(b.errs, err) + return b +} + +func (b *builder) RegisterStdoutExporter(opts ...stdouttrace.Option) TracerProviderBuilder { + defaultOpts := []stdouttrace.Option{ + stdouttrace.WithPrettyPrint(), + } + // Make sure to order the defaultOpts first, so opts can override the default ones + opts = append(defaultOpts, opts...) + // Run the main constructor for the stdout exporter + exp, err := stdouttrace.New(opts...) + b.exporters = append(b.exporters, exp) + b.errs = append(b.errs, err) + return b +} + +func (b *builder) WithOptions(opts ...tracesdk.TracerProviderOption) TracerProviderBuilder { + b.tpOpts = append(b.tpOpts, opts...) + return b +} + +func (b *builder) WithAttributes(attrs ...attribute.KeyValue) TracerProviderBuilder { + b.attrs = append(b.attrs, attrs...) + return b +} + +func (b *builder) WithSynchronousExports(sync bool) TracerProviderBuilder { + b.sync = sync + return b +} + +func (b *builder) WithLogging(log bool) TracerProviderBuilder { + b.log = log + return b +} + +var ErrNoExportersProvided = errors.New("no exporters provided") + +func (b *builder) Build() (SDKTracerProvider, error) { + // Combine and filter the errors from the exporter building + if err := multierr.Combine(b.errs...); err != nil { + return nil, err + } + if len(b.exporters) == 0 { + return nil, ErrNoExportersProvided + } + // TODO: Require at least one exporter + + // By default, set the service name to "libgitops". + // This can be overridden through WithAttributes + defaultAttrs := []attribute.KeyValue{ + semconv.ServiceNameKey.String("libgitops"), + } + // Make sure to order the defaultAttrs first, so b.attrs can override the default ones + attrs := append(defaultAttrs, b.attrs...) + + // By default, register a resource with the given attributes + defaultTpOpts := []tracesdk.TracerProviderOption{ + // Record information about this application in an Resource. + tracesdk.WithResource(resource.NewWithAttributes(semconv.SchemaURL, attrs...)), + } + + // Register all exporters with the options list + for _, exporter := range b.exporters { + // The non-syncing mode shall only be used in testing. The batching mode must be used in production. + if b.sync { + defaultTpOpts = append(defaultTpOpts, tracesdk.WithSyncer(exporter)) + } else { + defaultTpOpts = append(defaultTpOpts, tracesdk.WithBatcher(exporter)) + } + } + + // Make sure to order the defaultTpOpts first, so b.tpOpts can override the default ones + opts := append(defaultTpOpts, b.tpOpts...) + // Build the tracing provider + tpsdk := tracesdk.NewTracerProvider(opts...) + if b.log { + return NewLoggingTracerProvider(tpsdk), nil + } + return tpsdk, nil +} + +func (b *builder) InstallGlobally() error { + // First, build the tracing provider... + tp, err := b.Build() + if err != nil { + return err + } + // ... and register it globally + otel.SetTracerProvider(tp) + return nil +} + +// Shutdown tries to convert the trace.TracerProvider to a SDKTracerProvider to +// access its Shutdown method to make sure all traces have been flushed using the exporters +// before it's shutdown. If timeout == 0, the shutdown will be done without a grace period. +// If timeout > 0, the shutdown will have a grace period of that period of time to shutdown. +func Shutdown(ctx context.Context, tp trace.TracerProvider, timeout time.Duration) error { + return callSDKProvider(ctx, tp, timeout, func(ctx context.Context, sp SDKTracerProvider) error { + return sp.Shutdown(ctx) + }) +} + +// ForceFlush tries to convert the trace.TracerProvider to a SDKTracerProvider to +// access its ForceFlush method to make sure all traces have been flushed using the exporters. +// If timeout == 0, the flushing will be done without a grace period. +// If timeout > 0, the flushing will have a grace period of that period of time. +// Unlike Shutdown, which also flushes the traces, the provider is still operation after this. +func ForceFlush(ctx context.Context, tp trace.TracerProvider, timeout time.Duration) error { + return callSDKProvider(ctx, tp, timeout, func(ctx context.Context, sp SDKTracerProvider) error { + return sp.ForceFlush(ctx) + }) +} + +func callSDKProvider(ctx context.Context, tp trace.TracerProvider, timeout time.Duration, fn func(context.Context, SDKTracerProvider) error) error { + p, ok := tp.(SDKTracerProvider) + if !ok { + return nil + } + + if timeout != 0 { + // Do not make the application hang when it is shutdown. + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, timeout) + defer cancel() + } + + return fn(ctx, p) +} + +// ShutdownGlobal shuts down the global TracerProvider using Shutdown() +func ShutdownGlobal(ctx context.Context, timeout time.Duration) error { + return Shutdown(ctx, otel.GetTracerProvider(), timeout) +} + +// ForceFlushGlobal flushes the global TracerProvider using ForceFlush() +func ForceFlushGlobal(ctx context.Context, timeout time.Duration) error { + return ForceFlush(ctx, otel.GetTracerProvider(), timeout) +} diff --git a/pkg/tracing/tracing.go b/pkg/tracing/tracing.go new file mode 100644 index 00000000..e8dbc12e --- /dev/null +++ b/pkg/tracing/tracing.go @@ -0,0 +1,244 @@ +package tracing + +import ( + "context" + "errors" + "fmt" + "io" + "os" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/trace" + "go.uber.org/multierr" + "k8s.io/utils/pointer" +) + +// TODO: Make a SpanProcessor that can output relevant YAML based on what's happening, for +// unit testing. + +// FuncTracer is a higher-level type than the core trace.Tracer, which allows instrumenting +// a function running in a closure. It'll automatically create a span with the given name +// (plus maybe a pre-configured prefix). TraceFunc also returns a TraceFuncResult which allows +// the error to be instrumented automatically as well. +type FuncTracer interface { + trace.Tracer + // TraceFunc creates a trace with the given name while fn is executing. + // ErrFuncNotSupplied is returned if fn is nil. + TraceFunc(ctx context.Context, spanName string, fn TraceFunc, opts ...trace.SpanStartOption) TraceFuncResult +} + +// FuncTracerFromGlobal returns a new FuncTracer with the given name that uses the globally-registered +// tracing provider. +func FuncTracerFromGlobal(name string) FuncTracer { + return TracerOptions{Name: name, UseGlobal: pointer.Bool(true)} +} + +// BackgroundTracingContext +func BackgroundTracingContext() context.Context { + ctx := context.Background() + noopSpan := trace.SpanFromContext(ctx) + return trace.ContextWithSpan(ctx, &tracerProviderSpan{noopSpan, true}) +} + +type tracerProviderSpan struct { + trace.Span + useGlobal bool +} + +// Override the TracerProvider call if useGlobal is set +func (s *tracerProviderSpan) TracerProvider() trace.TracerProvider { + if s.useGlobal { + return otel.GetTracerProvider() + } + return s.Span.TracerProvider() +} + +type TracerNamed interface { + TracerName() string +} + +// +func FromContext(ctx context.Context, obj interface{}) FuncTracer { + name := "" + // TODO: Use a switch clause + tr, isTracerNamed := obj.(TracerNamed) + str, isString := obj.(string) + if isTracerNamed { + name = tr.TracerName() + } else if isString { + name = str + } else if obj != nil { + name = fmt.Sprintf("%T", obj) + } + + switch obj { + case os.Stdin: + name = "os.Stdin" + case os.Stdout: + name = "os.Stdout" + case os.Stderr: + name = "os.Stderr" + case io.Discard: + name = "io.Discard" + } + + return TracerOptions{Name: name, provider: trace.SpanFromContext(ctx).TracerProvider()} +} + +func FromContextUnnamed(ctx context.Context) FuncTracer { + return FromContext(ctx, "") +} + +// TraceFuncResult can either just simply return the error from TraceFunc, or register the error using +// DefaultErrRegisterFunc (and then return it), or register the error using a custom error handling function. +type TraceFuncResult interface { + // Error returns the error without any registration of it to the span. + Error() error + // Register registers the error using DefaultErrRegisterFunc. + Register() error + // RegisterCustom registers the error with the span using fn. + // ErrFuncNotSupplied is returned if fn is nil. + RegisterCustom(fn ErrRegisterFunc) error +} + +// ErrFuncNotSupplied is raised when a supplied function callback is nil. +var ErrFuncNotSupplied = errors.New("function argument not supplied") + +// MakeFuncNotSuppliedError formats ErrFuncNotSupplied in a standard way. +func MakeFuncNotSuppliedError(name string) error { + return fmt.Errorf("%w: %s", ErrFuncNotSupplied, name) +} + +// TraceFunc represents an instrumented function closure. +type TraceFunc func(context.Context, trace.Span) error + +// ErrRegisterFunc should register the return error of TraceFunc err with the span +type ErrRegisterFunc func(span trace.Span, err error) + +// TracerOptions implements TracerOption, trace.Tracer and FuncTracer. +//var _ TracerOption = TracerOptions{} +var _ trace.Tracer = TracerOptions{} +var _ FuncTracer = TracerOptions{} + +// TracerOptions contains options for creating a trace.Tracer and FuncTracer. +type TracerOptions struct { + // Name, if set to a non-empty value, will serve as the prefix for spans generated + // using the FuncTracer as "{o.Name}.{spanName}" (otherwise just "{spanName}"), and + // as the name of the trace.Tracer. + Name string + // UseGlobal specifies to default to the global tracing provider if true + // (or, just use a no-op TracerProvider, if false). This only applies if neither + // WithTracer or WithTracerProvider have been supplied. + UseGlobal *bool + // provider is what TracerProvider to use for creating a tracer. If nil, + // trace.NewNoopTracerProvider() is used. + provider trace.TracerProvider + // tracer can be set to use a specific tracer in Start(). If nil, a + // tracer is created using the provider. + tracer trace.Tracer +} + +func (o TracerOptions) ApplyToTracer(target *TracerOptions) { + if len(o.Name) != 0 { + target.Name = o.Name + } + if o.UseGlobal != nil { + target.UseGlobal = o.UseGlobal + } + if o.provider != nil { + target.provider = o.provider + } + if o.tracer != nil { + target.tracer = o.tracer + } +} + +// SpanName appends the name of the given function (spanName) to the given +// o.Name, if set. The return value of this function is aimed to be +// the name of the span, which will then be of the form "{o.Name}.{spanName}", +// or just "{spanName}". +func (o TracerOptions) fmtSpanName(spanName string) string { + // TODO: Does this match the other logic in FromContext? + if len(o.Name) != 0 && len(spanName) != 0 { + return o.Name + "." + spanName + } + // As either (or both) o.Name and spanName are empty strings, we can add them together + name := o.Name + spanName + if len(name) != 0 { + return name + } + return "unnamed_span" +} + +func (o TracerOptions) tracerProvider() trace.TracerProvider { + if o.provider != nil { + return o.provider + } else if o.UseGlobal != nil && *o.UseGlobal { + return otel.GetTracerProvider() + } else { + return trace.NewNoopTracerProvider() + } +} + +func (o TracerOptions) getTracer() trace.Tracer { + if o.tracer == nil { + o.tracer = o.tracerProvider().Tracer(o.Name) + } + return o.tracer +} + +func (o TracerOptions) Start(ctx context.Context, spanName string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + return o.getTracer().Start(ctx, o.fmtSpanName(spanName), opts...) +} + +func (o TracerOptions) TraceFunc(ctx context.Context, spanName string, fn TraceFunc, opts ...trace.SpanStartOption) TraceFuncResult { + ctx, span := o.Start(ctx, spanName, opts...) + // Close the span first in the returned TraceFuncResult, to be able to register the error before + // the span stops recording + + // Catch if fn == nil + if fn == nil { + return &traceFuncResult{MakeFuncNotSuppliedError("FuncTracer.TraceFunc"), span} + } + + return &traceFuncResult{fn(ctx, span), span} +} + +// IMPORTANT TO DOCUMENT: Always call one of the given functions, otherwise the span won't be +// closed +type traceFuncResult struct { + err error + span trace.Span +} + +func (r *traceFuncResult) Error() error { + // Important: Remember to end the span + r.span.End() + return r.err +} + +func (r *traceFuncResult) Register() error { + return r.RegisterCustom(DefaultErrRegisterFunc) +} + +func (r *traceFuncResult) RegisterCustom(fn ErrRegisterFunc) error { + if fn == nil { + err := multierr.Combine(r.err, MakeFuncNotSuppliedError("TraceFuncResult.RegisterCustom")) + DefaultErrRegisterFunc(r.span, err) + return err + } + + // Register the error with the span, and potentially process it. + fn(r.span, r.err) + // Important: Remember to end the span + r.span.End() + return r.err +} + +// DefaultErrRegisterFunc registers the error with the span using span.RecordError(err) +// if the error is non-nil, and then returns the error unchanged. +func DefaultErrRegisterFunc(span trace.Span, err error) { + if err != nil { + span.RecordError(err) + } +} diff --git a/pkg/tracing/tracing_test.go b/pkg/tracing/tracing_test.go new file mode 100644 index 00000000..b03ca396 --- /dev/null +++ b/pkg/tracing/tracing_test.go @@ -0,0 +1,65 @@ +package tracing + +/*func TestTracerOptions_getTracer(t *testing.T) { + tests := []struct { + name string + global trace.TracerProvider + opts []TracerOption + want trace.Tracer + }{ + { + name: "empty", + opts: []TracerOption{TracerOptions{}}, + want: trace.NewNoopTracerProvider().Tracer(""), + }, + { + name: "with name", + opts: []TracerOption{TracerOptions{Name: "foo"}}, + want: trace.NewNoopTracerProvider().Tracer("foo"), + }, + { + name: "use global", + global: customTp{}, + opts: []TracerOption{TracerOptions{Name: "foo", UseGlobal: pointer.BoolPtr(true)}}, + want: trace.NewNoopTracerProvider().Tracer("custom-foo"), + }, + { + name: "use global", + global: customTp{}, + opts: []TracerOption{TracerOptions{Name: "foo", UseGlobal: pointer.BoolPtr(true)}}, + want: trace.NewNoopTracerProvider().Tracer("custom-foo"), + }, + { + name: "use custom tp", + opts: []TracerOption{TracerOptions{Name: "foo"}, WithTracerProvider(customTp{})}, + want: trace.NewNoopTracerProvider().Tracer("custom-foo"), + }, + { + name: "use custom tracer", + opts: []TracerOption{TracerOptions{Name: "foo"}, WithTracer(customTp{}.Tracer("custom-bar"))}, + want: customTp{}.Tracer("custom-bar"), + }, + } + for _, tt := range tests { + earlierTp := otel.GetTracerProvider() + if tt.global != nil { + otel.SetTracerProvider(tt.global) + } + o := TracerOptions{} + for _, opt := range tt.opts { + opt.ApplyToTracer(&o) + } + got := o.getTracer() + assert.Equal(t, tt.want, got) + if tt.global != nil { + otel.SetTracerProvider(earlierTp) + } + } +} + +type customTp struct{} + +func (customTp) Tracer(instrumentationName string, opts ...trace.TracerOption) trace.Tracer { + return trace.NewNoopTracerProvider().Tracer("custom-" + instrumentationName) +} +*/ diff --git a/pkg/util/compositeio/compositeio.go b/pkg/util/compositeio/compositeio.go new file mode 100644 index 00000000..dcbca21f --- /dev/null +++ b/pkg/util/compositeio/compositeio.go @@ -0,0 +1,38 @@ +package compositeio + +import ( + "fmt" + "io" + + "github.com/weaveworks/libgitops/pkg/tracing" +) + +func ReadCloser(r io.Reader, c io.Closer) io.ReadCloser { + return readCloser{r, c} +} + +type readCloser struct { + io.Reader + io.Closer +} + +func (rc readCloser) TracerName() string { + return fmt.Sprintf("compositeio.readCloser{%T, %T}", rc.Reader, rc.Closer) +} + +var _ tracing.TracerNamed = readCloser{} + +func WriteCloser(w io.Writer, c io.Closer) io.WriteCloser { + return writeCloser{w, c} +} + +type writeCloser struct { + io.Writer + io.Closer +} + +func (wc writeCloser) TracerName() string { + return fmt.Sprintf("compositeio.writeCloser{%T, %T}", wc.Writer, wc.Closer) +} + +var _ tracing.TracerNamed = writeCloser{} diff --git a/pkg/util/limitedio/limitedio.go b/pkg/util/limitedio/limitedio.go new file mode 100644 index 00000000..745c955c --- /dev/null +++ b/pkg/util/limitedio/limitedio.go @@ -0,0 +1,178 @@ +package limitedio + +import ( + "bytes" + "errors" + "fmt" + "io" + "strconv" + + "github.com/weaveworks/libgitops/pkg/util/structerr" +) + +// DefaultMaxReadSize is 3 MB, which matches the default behavior of Kubernetes. +// (The API server only accepts request bodies of 3MB by default.) +const DefaultMaxReadSize Limit = 3 * 1024 * 1024 +const Infinite Limit = -1 + +type Limit int64 + +func (l Limit) String() string { + if l <= 0 { + return "infinite" + } + return strconv.FormatInt(int64(l), 10) +} +func (l Limit) Int64() int64 { return int64(l) } +func (l Limit) Int() (int, error) { + i := int(l) + if int64(i) != int64(l) { + return 0, errors.New("the limit overflows int") + } + return i, nil +} + +func (l Limit) IsLessThan(len int64) bool { + // l <= 0 means "l is infinite" => limit is larger than len => not less than len + if l <= 0 { + return false + } + return l.Int64() < len +} + +func (l Limit) IsLessThanOrEqual(len int64) bool { + // l <= 0 means "l is infinite" => limit is larger than len => not less than len + if l <= 0 { + return false + } + return l.Int64() <= len +} + +// ErrReadSizeOverflow returns a new *ReadSizeOverflowError +func ErrReadSizeOverflow(maxReadSize Limit) *ReadSizeOverflowError { + return &ReadSizeOverflowError{MaxReadSize: maxReadSize} +} + +// Enforce all struct errors implementing structerr.StructError +var _ structerr.StructError = &ReadSizeOverflowError{} + +// ReadSizeOverflowError describes that a read or write has grown larger than +// allowed. It is up to the implementer to describe what a "frame" in this +// context is. This error is e.g. returned from the NewReader implementation. +// If MaxReadSize is non-zero, it is included in the error text. +// +// This error can be checked for equality using errors.Is(err, &ReadSizeOverflowError{}) +type ReadSizeOverflowError struct { + // +optional + MaxReadSize Limit +} + +func (e *ReadSizeOverflowError) Error() string { + msg := "frame was larger than maximum allowed size" + if e.MaxReadSize != 0 { + msg = fmt.Sprintf("%s %d bytes", msg, e.MaxReadSize) + } + return msg +} + +func (e *ReadSizeOverflowError) Is(target error) bool { + _, ok := target.(*ReadSizeOverflowError) + return ok +} + +// Reader is a specialized io.Reader helper type, which allows detecting when +// a read grows larger than the allowed maxReadSize, returning a ErrReadSizeOverflow in that case. +// +// Internally there's a byte counter registering how many bytes have been read using the io.Reader +// across all Read calls since the last ResetCounter reset, which resets the byte counter to 0. This +// means that if you have successfully read one frame within bounds of maxReadSize, and want to +// re-use the underlying io.Reader for the next frame, you shall run ResetCounter to start again. +// +// maxReadSize is specified when constructing an Reader, and defaults to DefaultMaxReadSize +// if left as the empty value 0. +// If maxReadSize is negative, the reader transparently forwards all calls without any restrictions. +// +// Note: The Reader implementation is not thread-safe, that is for higher-level interfaces +// to implement and ensure. +type Reader interface { + // The byte count returned across consecutive Read(p) calls are at maximum maxReadSize, until reset + // by ResetCounter. + io.Reader + // ResetCounter resets the byte counter counting how many bytes have been read using Read(p) + ResetCounter() +} + +// NewReader makes a new Reader implementation. +func NewReader(r io.Reader, maxReadSize Limit) Reader { + // Default maxReadSize if unset. + if maxReadSize == 0 { + maxReadSize = DefaultMaxReadSize + } + + return &ioLimitedReader{ + reader: r, + buf: new(bytes.Buffer), + maxReadSize: maxReadSize, + } +} + +type ioLimitedReader struct { + reader io.Reader + buf *bytes.Buffer + maxReadSize Limit + byteCounter int64 +} + +func (l *ioLimitedReader) Read(b []byte) (int, error) { + // If l.maxReadSize is negative, put no restrictions on the read + maxReadSize := l.maxReadSize.Int64() + if maxReadSize < 0 { + return l.reader.Read(b) + } + // If we've already read more than we're allowed to, return an overflow error + if l.byteCounter > maxReadSize { + // Keep returning this error as long as relevant + return 0, ErrReadSizeOverflow(l.maxReadSize) + + } else if l.byteCounter == maxReadSize { + // At this point we're not sure if the frame actually stops here or not + // To figure that out; read one more byte into tmp + tmp := make([]byte, 1) + tmpn, err := l.reader.Read(tmp) + + // Write the read byte into the persistent buffer, for later use when l.byteCounter < l.maxReadSize + _, _ = l.buf.Write(tmp[:tmpn]) + // Increase the byteCounter, as bytes written to buf counts as "read" + l.byteCounter += int64(tmpn) + + // If no bytes were read; it's ok as we didn't exceed the limit. Return + // the error; often nil or io.EOF in this case. + if tmpn == 0 { + return 0, err + } + // Return that the frame overflowed now, as were able to read the byte (tmpn must be 1) + return 0, ErrReadSizeOverflow(l.maxReadSize) + } // else l.byteCounter < l.maxReadSize + + // We can at maximum read bytesLeft bytes more, shrink b accordingly if b is larger than the + // maximum allowed amount to read. + bytesLeft := maxReadSize - l.byteCounter + if int64(len(b)) > bytesLeft { + b = b[:bytesLeft] + } + + // First, flush any bytes in the buffer. By convention, the writes to buf have already + // increased byteCounter, so no need to do that now. No need to check the error as buf + // only returns io.EOF, and that's not important, it's even expected in most cases. + m, _ := l.buf.Read(b) + // Move the b slice forward m bytes as the m first bytes of b have now been populated + b = b[m:] + + // Read from the reader into the rest of b + n, err := l.reader.Read(b) + // Register how many bytes have been read now additionally + l.byteCounter += int64(n) + return n, err +} + +func (r *ioLimitedReader) ResetCounter() { r.byteCounter = 0 } diff --git a/pkg/util/structerr/structerr.go b/pkg/util/structerr/structerr.go new file mode 100644 index 00000000..e135b640 --- /dev/null +++ b/pkg/util/structerr/structerr.go @@ -0,0 +1,13 @@ +package structerr + +// StructError is an interface for errors that are structs, and can be compared for +// errors.Is equality. Equality is determined by type equality, i.e. if the pointer +// receiver is *MyError and target can be successfully casted using target.(*MyError), +// then target and the pointer reciever error are equal, otherwise not. +// +// This is needed because errors.Is does not support equality like this for structs +// by default. +type StructError interface { + error + Is(target error) bool +} From 6032359743ec8f71f7edfb0eb0746f2dc0355a47 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Tue, 27 Jul 2021 14:50:54 +0300 Subject: [PATCH 142/149] Update the serializer package to use the new framing library --- pkg/serializer/comments.go | 24 +- pkg/serializer/comments/LICENSE | 201 ------------ pkg/serializer/comments/comments.go | 117 ------- pkg/serializer/comments/comments_test.go | 369 ----------------------- pkg/serializer/comments/lost.go | 118 -------- pkg/serializer/comments_test.go | 12 +- pkg/serializer/decode.go | 34 +-- pkg/serializer/encode.go | 32 +- pkg/serializer/error_structs.go | 52 ---- pkg/serializer/frame_reader.go | 204 ------------- pkg/serializer/frame_reader_test.go | 116 ------- pkg/serializer/frame_single.go | 84 ------ pkg/serializer/frame_utils.go | 37 --- pkg/serializer/frame_writer.go | 128 -------- pkg/serializer/frame_writer_test.go | 66 ---- pkg/serializer/patch.go | 6 +- pkg/serializer/serializer.go | 19 +- pkg/serializer/serializer_test.go | 138 +++++---- 18 files changed, 145 insertions(+), 1612 deletions(-) delete mode 100644 pkg/serializer/comments/LICENSE delete mode 100644 pkg/serializer/comments/comments.go delete mode 100644 pkg/serializer/comments/comments_test.go delete mode 100644 pkg/serializer/comments/lost.go delete mode 100644 pkg/serializer/error_structs.go delete mode 100644 pkg/serializer/frame_reader.go delete mode 100644 pkg/serializer/frame_reader_test.go delete mode 100644 pkg/serializer/frame_single.go delete mode 100644 pkg/serializer/frame_utils.go delete mode 100644 pkg/serializer/frame_writer.go delete mode 100644 pkg/serializer/frame_writer_test.go diff --git a/pkg/serializer/comments.go b/pkg/serializer/comments.go index a0169392..7ac6461c 100644 --- a/pkg/serializer/comments.go +++ b/pkg/serializer/comments.go @@ -2,12 +2,15 @@ package serializer import ( "bytes" + "context" "encoding/base64" "errors" "fmt" "github.com/sirupsen/logrus" - "github.com/weaveworks/libgitops/pkg/serializer/comments" + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/frame" + "github.com/weaveworks/libgitops/pkg/frame/sanitize/comments" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/kustomize/kyaml/yaml" @@ -24,10 +27,10 @@ var ( // tryToPreserveComments tries to save the original file data (base64-encoded) into an annotation. // This original file data can be used at encoding-time to preserve comments -func (d *decoder) tryToPreserveComments(doc []byte, obj runtime.Object, ct ContentType) { +func (d *decoder) tryToPreserveComments(doc []byte, obj runtime.Object, ct content.ContentType) { // If the user opted into preserving comments and the format is YAML, proceed // If they didn't, return directly - if !(d.opts.PreserveComments == PreserveCommentsStrict && ct == ContentTypeYAML) { + if !(d.opts.PreserveComments == PreserveCommentsStrict && ct == content.ContentTypeYAML) { return } @@ -39,7 +42,8 @@ func (d *decoder) tryToPreserveComments(doc []byte, obj runtime.Object, ct Conte } // tryToPreserveComments tries to locate the possibly-saved original file data in the object's annotation -func (e *encoder) encodeWithCommentSupport(versionEncoder runtime.Encoder, fw FrameWriter, obj runtime.Object, metaObj metav1.Object) error { +func (e *encoder) encodeWithCommentSupport(versionEncoder runtime.Encoder, fw frame.Writer, obj runtime.Object, metaObj metav1.Object) error { + ctx := context.TODO() // If the user did not opt into preserving comments, just sanitize ObjectMeta temporarily and and return if e.opts.PreserveComments == PreserveCommentsDisable { // Normal encoding without the annotation (so it doesn't leak by accident) @@ -47,7 +51,7 @@ func (e *encoder) encodeWithCommentSupport(versionEncoder runtime.Encoder, fw Fr } // The user requested to preserve comments, but content type is not YAML, so log, sanitize and return - if fw.ContentType() != ContentTypeYAML { + if fw.ContentType() != content.ContentTypeYAML { logrus.Debugf("Asked to preserve comments, but ContentType is not YAML, so ignoring") // Normal encoding without the annotation (so it doesn't leak by accident) @@ -64,7 +68,7 @@ func (e *encoder) encodeWithCommentSupport(versionEncoder runtime.Encoder, fw Fr // Encode the new object into a temporary buffer, it should not be written as the "final result" to the FrameWriter buf := new(bytes.Buffer) - if err := noAnnotationWrapper(metaObj, e.normalEncodeFunc(versionEncoder, NewYAMLFrameWriter(buf), obj)); err != nil { + if err := noAnnotationWrapper(metaObj, e.normalEncodeFunc(versionEncoder, frame.ToYAMLBuffer(buf), obj)); err != nil { // fatal error return err } @@ -78,20 +82,22 @@ func (e *encoder) encodeWithCommentSupport(versionEncoder runtime.Encoder, fw Fr } // Copy over comments from the old to the new schema + // TODO: Move over to use the frame Sanitizer flow if err := comments.CopyComments(priorNode, afterNode, true); err != nil { // fatal error return err } // Print the new schema with the old comments kept to the FrameWriter - _, err = fmt.Fprint(fw, afterNode.MustString()) + _, err = fmt.Fprint(frame.ToIoWriteCloser(ctx, fw), afterNode.MustString()) // we're done, exit the encode function return err } -func (e *encoder) normalEncodeFunc(versionEncoder runtime.Encoder, fw FrameWriter, obj runtime.Object) func() error { +func (e *encoder) normalEncodeFunc(versionEncoder runtime.Encoder, fw frame.Writer, obj runtime.Object) func() error { return func() error { - return versionEncoder.Encode(obj, fw) + ctx := context.TODO() + return versionEncoder.Encode(obj, frame.ToIoWriteCloser(ctx, fw)) } } diff --git a/pkg/serializer/comments/LICENSE b/pkg/serializer/comments/LICENSE deleted file mode 100644 index 9c8f3ea0..00000000 --- a/pkg/serializer/comments/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/pkg/serializer/comments/comments.go b/pkg/serializer/comments/comments.go deleted file mode 100644 index 140542c6..00000000 --- a/pkg/serializer/comments/comments.go +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// This package provides a means to copy over comments between -// two kyaml/yaml.RNode trees. This code is derived from -// the sigs.k8s.io/kustomize/kyaml/comments package, at revision -// 600d4f2c0bf174abd76d03e49939ee0c34b2a019. -// -// It has been slightly modified and adapted to not lose any -// comment from the old tree, although the node the comment is -// attached to doesn't exist in the new tree. To solve this, -// this package moves any such comments to the beginning of the -// file. -// This file is a temporary means as long as we're waiting for -// these code changes to get upstreamed to its origin, the kustomize repo. -// https://pkg.go.dev/sigs.k8s.io/kustomize/kyaml/comments?tab=doc#CopyComments - -package comments - -import ( - "sigs.k8s.io/kustomize/kyaml/openapi" - "sigs.k8s.io/kustomize/kyaml/yaml" - "sigs.k8s.io/kustomize/kyaml/yaml/walk" -) - -// CopyComments recursively copies the comments on fields in from to fields in to -func CopyComments(from, to *yaml.RNode, moveCommentsTop bool) error { - // create the copier struct for the specified mode - c := &copier{moveCommentsTop, nil, make(map[int]trackedKey)} - - // copy over comments for the root tree(s) - c.copyFieldComments(from, to) - - // walk the fields copying comments - _, err := walk.Walker{ - Sources: []*yaml.RNode{from, to}, - Visitor: c, - VisitKeysAsScalars: true}.Walk() - - // restore lost comments to the top of the document, if applicable - if moveCommentsTop { - c.restoreLostComments(to) - } - - return err -} - -// copier implements walk.Visitor, and copies comments to fields shared between 2 instances -// of a resource -type copier struct { - // moveCommentsTop specifies whether to recover lost comments or not - moveCommentsTop bool - // if moveCommentsTop is true, this slice will be populated with lost comment entries while iterating - lostComments []lostComment - // if moveCommentsTop is true, this map will be populated with tracked YAML keys for lines while iterating - trackedKeys map[int]trackedKey -} - -func (c *copier) VisitMap(s walk.Sources, _ *openapi.ResourceSchema) (*yaml.RNode, error) { - c.copyFieldComments(s.Dest(), s.Origin()) - return s.Dest(), nil -} - -func (c *copier) VisitScalar(s walk.Sources, _ *openapi.ResourceSchema) (*yaml.RNode, error) { - to := s.Origin() - // TODO: File a bug with upstream yaml to handle comments for FoldedStyle scalar nodes - // Hack: convert FoldedStyle scalar node to DoubleQuotedStyle as the line comments are - // being serialized without space - // https://github.com/GoogleContainerTools/kpt/issues/766 - if to != nil && to.Document().Style == yaml.FoldedStyle { - to.Document().Style = yaml.DoubleQuotedStyle - } - - c.copyFieldComments(s.Dest(), to) - return s.Dest(), nil -} - -func (c *copier) VisitList(s walk.Sources, _ *openapi.ResourceSchema, _ walk.ListKind) (*yaml.RNode, error) { - c.copyFieldComments(s.Dest(), s.Origin()) - destItems := s.Dest().Content() - originItems := s.Origin().Content() - - for i := 0; i < len(destItems) && i < len(originItems); i++ { - dest := destItems[i] - origin := originItems[i] - - if dest.Value == origin.Value { - c.copyFieldComments(yaml.NewRNode(dest), yaml.NewRNode(origin)) - } - } - - return s.Dest(), nil -} - -// copyFieldComments copies the comment from one field to another -func (c *copier) copyFieldComments(from, to *yaml.RNode) { - // If either from or to doesn't exist, return quickly - if from == nil || to == nil { - - // If we asked for moving lost comments (i.e. if from is non-nil and to is nil), - // do it through the moveLostCommentToTop function - if c.moveCommentsTop && from != nil && to == nil { - c.rememberLostComments(from) - } - return - } - - if to.Document().LineComment == "" { - to.Document().LineComment = from.Document().LineComment - } - if to.Document().HeadComment == "" { - to.Document().HeadComment = from.Document().HeadComment - } - if to.Document().FootComment == "" { - to.Document().FootComment = from.Document().FootComment - } -} diff --git a/pkg/serializer/comments/comments_test.go b/pkg/serializer/comments/comments_test.go deleted file mode 100644 index 233feeec..00000000 --- a/pkg/serializer/comments/comments_test.go +++ /dev/null @@ -1,369 +0,0 @@ -// Copyright 2019 The Kubernetes Authors. -// SPDX-License-Identifier: Apache-2.0 - -// This package provides a means to copy over comments between -// two kyaml/yaml.RNode trees. This code is derived from -// the sigs.k8s.io/kustomize/kyaml/comments package, at revision -// 600d4f2c0bf174abd76d03e49939ee0c34b2a019. -// -// It has been slightly modified and adapted to not lose any -// comment from the old tree, although the node the comment is -// attached to doesn't exist in the new tree. To solve this, -// this package moves any such comments to the beginning of the -// file. -// This file is a temporary means as long as we're waiting for -// these code changes to get upstreamed to its origin, the kustomize repo. -// https://pkg.go.dev/sigs.k8s.io/kustomize/kyaml/comments?tab=doc#CopyComments - -package comments - -import ( - "strings" - "testing" - - "github.com/stretchr/testify/assert" - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -func TestCopyComments(t *testing.T) { - testCases := []struct { - name string - from string - to string - expected string - }{ - { - name: "copy_comments", - from: ` -# A -# -# B - -# C -apiVersion: apps/v1 -kind: Deployment -spec: # comment 1 - # comment 2 - replicas: 3 # comment 3 - # comment 4 -`, - to: ` -apiVersion: apps/v1 -kind: Deployment -spec: - replicas: 4 -`, - expected: ` -# A -# -# B - -# C -apiVersion: apps/v1 -kind: Deployment -spec: # comment 1 - # comment 2 - replicas: 4 # comment 3 - # comment 4 -`, - }, { - name: "associative_list", - from: ` -apiVersion: apps/v1 -kind: Deployment -spec: - template: - spec: - containers: - - name: foo - image: bar # comment 1 -`, - to: ` -apiVersion: apps/v1 -kind: Deployment -spec: - template: - spec: - containers: - - name: foo - image: bar -`, - expected: ` -apiVersion: apps/v1 -kind: Deployment -spec: - template: - spec: - containers: - - name: foo - image: bar # comment 1 -`, - }, { - name: "keep_comments", - from: ` -# A -# -# B - -# C -apiVersion: apps/v1 -kind: Deployment -spec: # comment 1 - # comment 2 - replicas: 3 # comment 3 - # comment 4 -`, - to: ` -apiVersion: apps/v1 -kind: Deployment -spec: - replicas: 4 # comment 5 -`, - expected: ` -# A -# -# B - -# C -apiVersion: apps/v1 -kind: Deployment -spec: # comment 1 - # comment 2 - replicas: 4 # comment 5 - # comment 4 -`, - }, { - name: "copy_item_comments", - from: ` -apiVersion: apps/v1 -kind: Deployment -items: -- a # comment -`, - to: ` -apiVersion: apps/v1 -kind: Deployment -items: -- a -`, - expected: ` -apiVersion: apps/v1 -kind: Deployment -items: -- a # comment -`, - }, { - name: "copy_item_comments_2", - from: ` -apiVersion: apps/v1 -kind: Deployment -items: -# comment -- a -`, - to: ` -apiVersion: apps/v1 -kind: Deployment -items: -- a -`, - expected: ` -apiVersion: apps/v1 -kind: Deployment -items: -# comment -- a -`, - }, { - name: "copy_item_comments_middle", - from: ` -apiVersion: apps/v1 -kind: Deployment -items: -- a -- b # comment -- c -`, - to: ` -apiVersion: apps/v1 -kind: Deployment -items: -- d -- b -- e -`, - expected: ` -apiVersion: apps/v1 -kind: Deployment -items: -- d -- b # comment -- e -`, - }, { - name: "copy_item_comments_moved", - from: ` -apiVersion: apps/v1 -kind: Deployment -items: -- a -- b # comment -- c -`, - to: ` -apiVersion: apps/v1 -kind: Deployment -items: -- a -- c -- b -`, - expected: ` -apiVersion: apps/v1 -kind: Deployment -items: -- a -- c -- b -`, - }, { - name: "copy_item_comments_no_match", - from: ` -apiVersion: apps/v1 -kind: Deployment -items: -- a # comment -`, - to: ` -apiVersion: apps/v1 -kind: Deployment -items: -- b -`, - expected: ` -apiVersion: apps/v1 -kind: Deployment -items: -- b -`, - }, { - name: "copy_item_comments_add", - from: ` -apiVersion: apps/v1 -kind: Deployment -items: -- a # comment -`, - to: ` -apiVersion: apps/v1 -kind: Deployment -items: -- a -- b -`, - expected: ` -apiVersion: apps/v1 -kind: Deployment -items: -- a # comment -- b -`, - }, { - name: "copy_item_comments_remove", - from: ` -apiVersion: apps/v1 -kind: Deployment -items: -- a # comment -- b -`, - to: ` -apiVersion: apps/v1 -kind: Deployment -items: -- a -`, - expected: ` -apiVersion: apps/v1 -kind: Deployment -items: -- a # comment -`, - }, { - name: "copy_comments_folded_style", - from: ` -apiVersion: v1 -kind: ConfigMap -data: - somekey: "012345678901234567890123456789012345678901234567890123456789012345678901234" # x -`, - to: ` -apiVersion: v1 -kind: ConfigMap -data: - somekey: >- - 012345678901234567890123456789012345678901234567890123456789012345678901234 -`, - expected: ` -apiVersion: v1 -kind: ConfigMap -data: - somekey: "012345678901234567890123456789012345678901234567890123456789012345678901234" # x -`, - }, { - name: "copy_comments_move_to_top", - from: ` -# Top comment - -apiVersion: v1 -kind: ConfigMap # Foo -# Bar -data: - # Baz - somekey: "012345678901234567890123456789012345678901234567890123456789012345678901234" # x -`, - to: ` -apiVersion: v1 -`, - expected: ` -# Top comment -# Comments lost during file manipulation: -# Field "data": "Bar" -# Field "somekey": "Baz" -# Field "somekey": "x" -# Field "kind": "Foo" - -apiVersion: v1 -`, - }, - } - - for i := range testCases { - tc := testCases[i] - t.Run(tc.name, func(t *testing.T) { - from, err := yaml.Parse(tc.from) - if !assert.NoError(t, err) { - t.FailNow() - } - - to, err := yaml.Parse(tc.to) - if !assert.NoError(t, err) { - t.FailNow() - } - - err = CopyComments(from, to, true) - if !assert.NoError(t, err) { - t.FailNow() - } - - actual, err := to.String() - if !assert.NoError(t, err) { - t.FailNow() - } - - if !assert.Equal(t, strings.TrimSpace(tc.expected), strings.TrimSpace(actual)) { - t.FailNow() - } - }) - } -} diff --git a/pkg/serializer/comments/lost.go b/pkg/serializer/comments/lost.go deleted file mode 100644 index f85fc1f7..00000000 --- a/pkg/serializer/comments/lost.go +++ /dev/null @@ -1,118 +0,0 @@ -package comments - -import ( - "fmt" - "strings" - - "sigs.k8s.io/kustomize/kyaml/yaml" -) - -// lostComment specifies a mapping between a fieldName (in the old structure), which doesn't exist in the -// new tree, and its related comment. It optionally specifies the line number of the comment, a positive -// line number is used to distinguish inline comments, which require special handling to resolve the -// correct field name, since they are attached to the value and not the key of a YAML key-value pair. -type lostComment struct { - fieldName string - comment string - line int -} - -// Since the YAML walker needs to visit all keys as scalar nodes, we have no way of distinguishing keys from -// values when trying to resolve the field names for inline comments. By tracking the leftmost key (lowest -// column value, be it a key or value) for each row, we can figure out the actual key for inline comments -// and not accidentally use a value as the field name, since keys are guaranteed to come before values. -type trackedKey struct { - name string - column int -} - -// trackKey compares the column position of the given node to the stored best (lowest) column position for the -// node's line and replaces the best if the given node is more likely to be a key (has a smaller column value). -func (c *copier) trackKey(node *yaml.Node) { - // If the given key doesn't have a smaller column value, return. - if key, ok := c.trackedKeys[node.Line]; ok { - if key.column < node.Column { - return - } - } - - // Store the new best tracked key for the line. - c.trackedKeys[node.Line] = trackedKey{ - name: node.Value, - column: node.Column, - } -} - -// parseComments parses the line, head and foot comments of the given node in this -// order and cleans them up (removes the potential "#" prefix and trims whitespace). -func parseComments(node *yaml.Node) (comments []string) { - for _, comment := range []string{node.LineComment, node.HeadComment, node.FootComment} { - comments = append(comments, strings.TrimSpace(strings.TrimPrefix(comment, "#"))) - } - - return -} - -// rememberLostComments goes through the comments attached to the 'from' node and adds -// them to the internal lostComments slice for usage after the tree walk. It also -// stores the line numbers for inline comments for resolving the correct field names. -func (c *copier) rememberLostComments(from *yaml.RNode) { - // Track the given node as a potential key for inline comments. - c.trackKey(from.Document()) - - // Get the field name, for head/foot comments this is the correct key, - // but for inline comments this resolves to the value of the field instead. - fieldName := from.Document().Value - comments := parseComments(from.Document()) - line := -1 // Don't store the line number of the comment by default, this is reserved for inline comments. - - for i, comment := range comments { - // If the line number is set (positive), an inline comment - // has been registered for this node and we can stop parsing. - if line >= 0 { - break - } - - // Do not store blank comment entries (nonexistent comments). - if len(comment) == 0 { - continue - } - - if i == 0 { - // If this node has an inline comment, store its line - // number for resolving the correct field name later. - line = from.Document().Line - } - - // Append the lost comment to the slice of copier. - c.lostComments = append(c.lostComments, lostComment{ - fieldName: fieldName, - comment: comment, - line: line, - }) - } -} - -// restoreLostComments writes the cached lost comments to the top of the to YAML tree. -// If it encounters inline comments, it will check the cached tracked keys for the -// best key for the line on which the comment resided. If no key is found for some -// reason, it will use the stored field name (the field value) as the key. -func (c *copier) restoreLostComments(to *yaml.RNode) { - for i, lc := range c.lostComments { - if i == 0 { - to.Document().HeadComment += "\nComments lost during file manipulation:" - } - - fieldName := lc.fieldName - if lc.line >= 0 { - // This is an inline comment, resolve the field name from the tracked keys. - if key, ok := c.trackedKeys[lc.line]; ok { - fieldName = key.name - } - } - - to.Document().HeadComment += fmt.Sprintf("\n# Field %q: %q", fieldName, lc.comment) - } - - to.Document().HeadComment = strings.TrimPrefix(to.Document().HeadComment, "\n") -} diff --git a/pkg/serializer/comments_test.go b/pkg/serializer/comments_test.go index 6332e5ca..7a03a25a 100644 --- a/pkg/serializer/comments_test.go +++ b/pkg/serializer/comments_test.go @@ -18,8 +18,8 @@ kind: Test spec: # Head comment data: - - field # Inline comment - - another + - field # Inline comment + - another thing: # Head comment var: true @@ -29,15 +29,15 @@ const sampleData2 = `kind: Test spec: # Head comment data: - - field # Inline comment - - another: - subthing: "yes" + - field # Inline comment + - another: + subthing: "yes" thing: # Head comment var: true status: nested: - fields: + fields: {} # Just a comment ` diff --git a/pkg/serializer/decode.go b/pkg/serializer/decode.go index 7f6cf116..7b4177da 100644 --- a/pkg/serializer/decode.go +++ b/pkg/serializer/decode.go @@ -1,16 +1,18 @@ package serializer import ( + "context" "fmt" "io" "reflect" + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/frame" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer/json" "k8s.io/apimachinery/pkg/runtime/serializer/versioning" - serializeryaml "k8s.io/apimachinery/pkg/runtime/serializer/yaml" ) // This is the groupversionkind for the v1.List object @@ -62,17 +64,18 @@ func (d *decoder) GetLockedScheme() LockedScheme { // If opts.DecodeUnknown is true, any type with an unrecognized apiVersion/kind will be returned as a // *runtime.Unknown object instead of returning a UnrecognizedTypeError. // opts.DecodeListElements is not applicable in this call. -func (d *decoder) Decode(fr FrameReader) (runtime.Object, error) { +func (d *decoder) Decode(fr frame.Reader) (runtime.Object, error) { // Read a frame from the FrameReader // TODO: Make sure to test the case when doc might contain something, and err is io.EOF - doc, err := fr.ReadFrame() + ctx := context.TODO() + doc, err := fr.ReadFrame(ctx) if err != nil { return nil, err } return d.decode(doc, nil, fr.ContentType()) } -func (d *decoder) decode(doc []byte, into runtime.Object, ct ContentType) (runtime.Object, error) { +func (d *decoder) decode(doc []byte, into runtime.Object, ct content.ContentType) (runtime.Object, error) { // If the scheme doesn't recognize a v1.List, and we enabled opts.DecodeListElements, // make the scheme able to decode the v1.List automatically if *d.opts.DecodeListElements { @@ -101,7 +104,7 @@ func (d *decoder) decode(doc []byte, into runtime.Object, ct ContentType) (runti // Give the user good errors wrt missing group & version // TODO: It might be unnecessary to unmarshal twice (as we do in handleDecodeError), // as gvk was returned from Decode above. - return nil, d.handleDecodeError(doc, err) + return nil, d.handleDecodeError(gvk, err) } // Fail fast if object is nil @@ -142,10 +145,11 @@ func (d *decoder) decode(doc []byte, into runtime.Object, ct ContentType) (runti // // TODO: Support decoding all frames at once into e.g. PartialMetadataLists, UnstructuredLists, or // metav1.Lists. -func (d *decoder) DecodeInto(fr FrameReader, into runtime.Object) error { +func (d *decoder) DecodeInto(fr frame.Reader, into runtime.Object) error { // Read a frame from the FrameReader. // TODO: Make sure to test the case when doc might contain something, and err is io.EOF - doc, err := fr.ReadFrame() + ctx := context.TODO() + doc, err := fr.ReadFrame(ctx) if err != nil { return err } @@ -170,7 +174,7 @@ func (d *decoder) DecodeInto(fr FrameReader, into runtime.Object) error { // added into the returning slice. The v1.List will in this case not be returned. // If opts.DecodeUnknown is true, any type with an unrecognized apiVersion/kind will be returned as a // *runtime.Unknown object instead of returning a UnrecognizedTypeError. -func (d *decoder) DecodeAll(fr FrameReader) ([]runtime.Object, error) { +func (d *decoder) DecodeAll(fr frame.Reader) ([]runtime.Object, error) { objs := []runtime.Object{} for { obj, err := d.Decode(fr) @@ -193,7 +197,7 @@ func (d *decoder) DecodeAll(fr FrameReader) ([]runtime.Object, error) { } // decodeUnknown decodes bytes of a certain content type into a returned *runtime.Unknown object -func (d *decoder) decodeUnknown(doc []byte, ct ContentType) (runtime.Object, error) { +func (d *decoder) decodeUnknown(doc []byte, ct content.ContentType) (runtime.Object, error) { // Do a DecodeInto the new pointer to the object we've got. The resulting into object is // also returned. // The content type isn't really used here, as runtime.Unknown will never implement @@ -201,15 +205,9 @@ func (d *decoder) decodeUnknown(doc []byte, ct ContentType) (runtime.Object, err return d.decode(doc, &runtime.Unknown{}, ct) } -func (d *decoder) handleDecodeError(doc []byte, origErr error) error { - // Parse the document's TypeMeta information - gvk, err := serializeryaml.DefaultMetaFactory.Interpret(doc) - if err != nil { - return fmt.Errorf("failed to interpret TypeMeta from the given the YAML: %v. Decode error was: %w", err, origErr) - } - +func (d *decoder) handleDecodeError(gvk *schema.GroupVersionKind, origErr error) error { // TODO: Unit test that typed errors are returned properly - + // TODO: Check for gvk == nil here? // Check if the group was known. If not, return that specific error if !d.Scheme().IsGroupRegistered(gvk.Group) { return NewUnrecognizedGroupError(*gvk, origErr) @@ -230,7 +228,7 @@ func (d *decoder) handleDecodeError(doc []byte, origErr error) error { return origErr } -func (d *decoder) extractNestedObjects(obj runtime.Object, ct ContentType) ([]runtime.Object, error) { +func (d *decoder) extractNestedObjects(obj runtime.Object, ct content.ContentType) ([]runtime.Object, error) { // If we didn't ask for list-unwrapping functionality, return directly if !*d.opts.DecodeListElements { return []runtime.Object{obj}, nil diff --git a/pkg/serializer/encode.go b/pkg/serializer/encode.go index 19f6b1ca..75a762c4 100644 --- a/pkg/serializer/encode.go +++ b/pkg/serializer/encode.go @@ -2,9 +2,13 @@ package serializer import ( "bytes" + "context" "encoding/json" + "io" "strings" + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/frame" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" k8sserializer "k8s.io/apimachinery/pkg/runtime/serializer" @@ -39,7 +43,7 @@ func (e *encoder) CodecFactory() *k8sserializer.CodecFactory { // if the given object is of an external version. // TODO: This should automatically convert to the preferred version // TODO: Fix that sometimes omitempty fields aren't respected -func (e *encoder) Encode(fw FrameWriter, objs ...runtime.Object) error { +func (e *encoder) Encode(fw frame.Writer, objs ...runtime.Object) error { for _, obj := range objs { // Get the kind for the given object gvk, err := GVKForObject(e.Scheme(), obj) @@ -67,11 +71,11 @@ func (e *encoder) Encode(fw FrameWriter, objs ...runtime.Object) error { // EncodeForGroupVersion encodes the given object for the specific groupversion. If the object // is not of that version currently it will try to convert. The output bytes are written to the // FrameWriter. The FrameWriter specifies the ContentType. -func (e *encoder) EncodeForGroupVersion(fw FrameWriter, obj runtime.Object, gv schema.GroupVersion) error { +func (e *encoder) EncodeForGroupVersion(fw frame.Writer, obj runtime.Object, gv schema.GroupVersion) error { // Get the serializer for the media type serializerInfo, ok := runtime.SerializerInfoForMediaType(e.codecs.SupportedMediaTypes(), string(fw.ContentType())) if !ok { - return ErrUnsupportedContentType + return content.ErrUnsupportedContentType(fw.ContentType()) // TODO: Say what content types are supported } // Choose the default, non-pretty serializer, as we prettify if needed later @@ -85,18 +89,22 @@ func (e *encoder) EncodeForGroupVersion(fw FrameWriter, obj runtime.Object, gv s // Get a version-specific encoder for the specified groupversion versionEncoder := encoderForVersion(e.Scheme(), encoder, gv) + ctx := context.TODO() + wc := frame.ToIoWriteCloser(ctx, fw) + // Check if the user requested prettified JSON output. // If the ContentType is JSON this is ok, we will intent the encode output on the fly. - if *e.opts.JSONIndent > 0 && fw.ContentType() == ContentTypeJSON { - fw = &jsonPrettyFrameWriter{indent: *e.opts.JSONIndent, fw: fw} + if *e.opts.JSONIndent > 0 && fw.ContentType() == content.ContentTypeJSON { + wc = &jsonPrettyWriter{indent: *e.opts.JSONIndent, wc: wc} } // Cast the object to a metav1.Object to get access to annotations metaobj, ok := toMetaObject(obj) // For objects without ObjectMeta, the cast will fail. Allow that failure and do "normal" encoding if !ok { - return versionEncoder.Encode(obj, fw) + return versionEncoder.Encode(obj, wc) } + // TODO: Document that the frame.Writer is not closed // Specialize the encoder for a specific gv and encode the object return e.encodeWithCommentSupport(versionEncoder, fw, obj, metaobj) @@ -115,12 +123,12 @@ func encoderForVersion(scheme *runtime.Scheme, encoder runtime.Encoder, gv schem ) } -type jsonPrettyFrameWriter struct { +type jsonPrettyWriter struct { indent int32 - fw FrameWriter + wc io.WriteCloser } -func (w *jsonPrettyFrameWriter) Write(p []byte) (n int, err error) { +func (w *jsonPrettyWriter) Write(p []byte) (n int, err error) { // Indent the source bytes var indented bytes.Buffer err = json.Indent(&indented, p, "", strings.Repeat(" ", int(w.indent))) @@ -128,10 +136,10 @@ func (w *jsonPrettyFrameWriter) Write(p []byte) (n int, err error) { return } // Write the pretty bytes to the underlying writer - n, err = w.fw.Write(indented.Bytes()) + n, err = w.wc.Write(indented.Bytes()) return } -func (w *jsonPrettyFrameWriter) ContentType() ContentType { - return w.fw.ContentType() +func (w *jsonPrettyWriter) Close() error { + return w.wc.Close() } diff --git a/pkg/serializer/error_structs.go b/pkg/serializer/error_structs.go deleted file mode 100644 index 11109b37..00000000 --- a/pkg/serializer/error_structs.go +++ /dev/null @@ -1,52 +0,0 @@ -package serializer - -var _ ReadCloser = &errReadCloser{} - -type errReadCloser struct { - err error -} - -func (rc *errReadCloser) Read(p []byte) (n int, err error) { - err = rc.err - return -} - -func (rc *errReadCloser) Close() error { - return nil -} - -var _ FrameReader = &errFrameReader{} - -type errFrameReader struct { - err error - contentType ContentType -} - -func (fr *errFrameReader) ReadFrame() ([]byte, error) { - return nil, fr.err -} - -func (fr *errFrameReader) ContentType() ContentType { - return fr.contentType -} - -// Close implements io.Closer and closes the underlying ReadCloser -func (fr *errFrameReader) Close() error { - return nil -} - -var _ FrameWriter = &errFrameWriter{} - -type errFrameWriter struct { - err error - contentType ContentType -} - -func (fw *errFrameWriter) Write(_ []byte) (n int, err error) { - err = fw.err - return -} - -func (fw *errFrameWriter) ContentType() ContentType { - return fw.contentType -} diff --git a/pkg/serializer/frame_reader.go b/pkg/serializer/frame_reader.go deleted file mode 100644 index c9fcd817..00000000 --- a/pkg/serializer/frame_reader.go +++ /dev/null @@ -1,204 +0,0 @@ -package serializer - -import ( - "bytes" - "errors" - "io" - "io/ioutil" - "os" - "sync" - - "k8s.io/apimachinery/pkg/runtime/serializer/json" -) - -const ( - defaultBufSize = 64 * 1024 // 64 kB - defaultMaxFrameSize = 16 * 1024 * 1024 // 16 MB -) - -var ( - // FrameOverflowErr is returned from FrameReader.ReadFrame when one frame exceeds the - // maximum size of 16 MB. - FrameOverflowErr = errors.New("frame was larger than maximum allowed size") -) - -// ReadCloser in this package is an alias for io.ReadCloser. It helps in Godoc to locate -// helpers in this package which returns writers (i.e. FromFile and FromBytes) -type ReadCloser io.ReadCloser - -// FrameReader is a content-type specific reader of a given ReadCloser. -// The FrameReader reads frames from the underlying ReadCloser and returns them for consumption. -// When io.EOF is reached, the stream is closed automatically. -type FrameReader interface { - ContentTyped - io.Closer - - // ReadFrame reads frames from the underlying ReadCloser and returns them for consumption. - // When io.EOF is reached, the stream is closed automatically. - ReadFrame() ([]byte, error) -} - -// FrameReaderFactory knows how to create various different FrameReaders for -// given ContentTypes. -type FrameReaderFactory interface { - // NewFrameReader returns a new FrameReader for the given ContentType, - // and ReadCloser that contains the underlying data that should be read. - NewFrameReader(contentType ContentType, rc ReadCloser) FrameReader -} - -// defaultFrameReaderFactory is the variable used in public methods. -var defaultFrameReaderFactory FrameReaderFactory = frameReaderFactory{} - -// frameReaderFactory is the default implementation of FrameReaderFactory. -type frameReaderFactory struct{} - -// Documentation below attached to NewFrameReader. -func (frameReaderFactory) NewFrameReader(contentType ContentType, rc ReadCloser) FrameReader { - switch contentType { - case ContentTypeYAML: - return newFrameReader(json.YAMLFramer.NewFrameReader(rc), contentType) - case ContentTypeJSON: - return newFrameReader(json.Framer.NewFrameReader(rc), contentType) - default: - return &errFrameReader{ErrUnsupportedContentType, contentType} - } -} - -// NewFrameReaderFactory returns the default variant of FrameReaderFactory capable -// of creating YAML- and JSON-compatible FrameReaders. -func NewFrameReaderFactory() FrameReaderFactory { - return frameReaderFactory{} -} - -// NewFrameReader returns a FrameReader for the given ContentType and data in the -// ReadCloser. The Reader is automatically closed in io.EOF. ReadFrame is called -// once each Decoder.Decode() or Decoder.DecodeInto() call. When Decoder.DecodeAll() is -// called, the FrameReader is read until io.EOF, upon where it is closed. -func NewFrameReader(contentType ContentType, rc ReadCloser) FrameReader { - return defaultFrameReaderFactory.NewFrameReader(contentType, rc) -} - -// NewYAMLFrameReader returns a FrameReader that supports both YAML and JSON. Frames are separated by "---\n" -// -// This call is the same as NewFrameReader(ContentTypeYAML, rc) -func NewYAMLFrameReader(rc ReadCloser) FrameReader { - return NewFrameReader(ContentTypeYAML, rc) -} - -// NewJSONFrameReader returns a FrameReader that supports both JSON. Objects are read from the stream one-by-one, -// each object making up its own frame. -// -// This call is the same as NewFrameReader(ContentTypeJSON, rc) -func NewJSONFrameReader(rc ReadCloser) FrameReader { - return NewFrameReader(ContentTypeJSON, rc) -} - -// newFrameReader returns a new instance of the frameReader struct -func newFrameReader(rc io.ReadCloser, contentType ContentType) *frameReader { - return &frameReader{ - rc: rc, - rcMu: &sync.Mutex{}, - bufSize: defaultBufSize, - maxFrameSize: defaultMaxFrameSize, - contentType: contentType, - } -} - -// frameReader is a FrameReader implementation -type frameReader struct { - // the underlying readcloser and the mutex that guards it - rc io.ReadCloser - rcMu *sync.Mutex - - bufSize int - maxFrameSize int - contentType ContentType -} - -// ReadFrame reads one frame from the underlying io.Reader. ReadFrame -// keeps on reading from the Reader in bufSize blocks, until the Reader either -// returns err == nil or EOF. If the Reader reports an ErrShortBuffer error, -// ReadFrame keeps on reading using new calls. ReadFrame might return both data and -// io.EOF. io.EOF will be returned in the final call. -func (rf *frameReader) ReadFrame() (frame []byte, err error) { - // Only one actor can read at a time - rf.rcMu.Lock() - defer rf.rcMu.Unlock() - - // Temporary buffer to parts of a frame into - var buf []byte - // How many bytes were read by the read call - var n int - // Multiplier for bufsize - c := 1 - for { - // Allocate a buffer of a multiple of bufSize. - buf = make([]byte, c*rf.bufSize) - // Call the underlying reader. - n, err = rf.rc.Read(buf) - // Append the returned bytes to the b slice returned - // If n is 0, this call is a no-op - frame = append(frame, buf[:n]...) - - // If the frame got bigger than the max allowed size, return and report the error - if len(frame) > rf.maxFrameSize { - err = FrameOverflowErr - return - } - - // Handle different kinds of errors - switch err { - case io.ErrShortBuffer: - // ignore the "buffer too short" error, and just keep on reading, now doubling the buffer - c *= 2 - continue - case nil: - // One document is "done reading", we should return it if valid - // Only return non-empty documents, i.e. skip e.g. leading `---` - if len(bytes.TrimSpace(frame)) > 0 { - // valid non-empty document - return - } - // The document was empty, reset the frame (just to be sure) and continue - frame = nil - continue - case io.EOF: - // we reached the end of the file, close the reader and return - rf.rc.Close() - return - default: - // unknown error, return it immediately - // TODO: Maybe return the error here? - return - } - } -} - -// ContentType returns the content type for the given FrameReader -func (rf *frameReader) ContentType() ContentType { - return rf.contentType -} - -// Close implements io.Closer and closes the underlying ReadCloser -func (rf *frameReader) Close() error { - // Only one actor can access rf.rc at a time - rf.rcMu.Lock() - defer rf.rcMu.Unlock() - - return rf.rc.Close() -} - -// FromFile returns a ReadCloser from the given file, or a ReadCloser which returns -// the given file open error when read. -func FromFile(filePath string) ReadCloser { - f, err := os.Open(filePath) - if err != nil { - return &errReadCloser{err} - } - return f -} - -// FromBytes returns a ReadCloser from the given byte content. -func FromBytes(content []byte) ReadCloser { - return ioutil.NopCloser(bytes.NewReader(content)) -} diff --git a/pkg/serializer/frame_reader_test.go b/pkg/serializer/frame_reader_test.go deleted file mode 100644 index 063ed8a0..00000000 --- a/pkg/serializer/frame_reader_test.go +++ /dev/null @@ -1,116 +0,0 @@ -package serializer - -import ( - "io" - "io/ioutil" - "reflect" - "strings" - "sync" - "testing" - - "k8s.io/apimachinery/pkg/runtime/serializer/json" -) - -const ( - fooYAML = `kind: Foo -apiVersion: bar/v1 -a: b1234567890 -c: d1234567890 -e: f1234567890 -hello: true` - - barYAML = `kind: Bar -apiVersion: foo/v1 -a: b1234567890 -c: d1234567890 -e: f1234567890 -hello: false` - - bazYAML = `baz: true` - - testYAML = "\n---\n" + fooYAML + "\n---\n" + barYAML + "\n---\n" + bazYAML -) - -func Test_FrameReader_ReadFrame(t *testing.T) { - testYAMLReadCloser := json.YAMLFramer.NewFrameReader(ioutil.NopCloser(strings.NewReader(testYAML))) - - type fields struct { - rc io.ReadCloser - bufSize int - maxFrameSize int - } - type result struct { - wantB []byte - wantErr bool - } - tests := []struct { - name string - fields fields - wants []result - }{ - { - name: "three-document YAML case", - fields: fields{ - rc: testYAMLReadCloser, - bufSize: 16, - maxFrameSize: 1024, - }, - wants: []result{ - { - wantB: []byte(fooYAML), - wantErr: false, - }, - { - wantB: []byte(barYAML), - wantErr: false, - }, - { - wantB: []byte(bazYAML), - wantErr: false, - }, - { - wantB: nil, - wantErr: true, - }, - }, - }, - { - name: "maximum size reached", - fields: fields{ - rc: testYAMLReadCloser, - bufSize: 16, - maxFrameSize: 32, - }, - wants: []result{ - { - wantB: nil, - wantErr: true, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - rf := &frameReader{ - rc: tt.fields.rc, - rcMu: &sync.Mutex{}, - bufSize: tt.fields.bufSize, - maxFrameSize: tt.fields.maxFrameSize, - } - for _, expected := range tt.wants { - gotB, err := rf.ReadFrame() - if (err != nil) != expected.wantErr { - t.Errorf("frameReader.ReadFrame() error = %v, wantErr %v", err, expected.wantErr) - return - } - if len(gotB) < len(expected.wantB) { - t.Errorf("frameReader.ReadFrame(): got smaller slice %v than expected %v", gotB, expected.wantB) - return - } - if !reflect.DeepEqual(gotB[:len(expected.wantB)], expected.wantB) { - t.Errorf("frameReader.ReadFrame() = %v, want %v", gotB, expected.wantB) - } - } - }) - } -} diff --git a/pkg/serializer/frame_single.go b/pkg/serializer/frame_single.go deleted file mode 100644 index 9d50da27..00000000 --- a/pkg/serializer/frame_single.go +++ /dev/null @@ -1,84 +0,0 @@ -package serializer - -import ( - "io" - "sync/atomic" -) - -// NewSingleFrameReader returns a FrameReader for only a single frame of -// the specified content type. This avoids overhead if it is known that the -// byte array only contains one frame. The given frame is returned in -// whole in the first ReadFrame() call, and io.EOF is returned in all future -// invocations. This FrameReader works for any ContentType and transparently -// exposes the given content type through the ContentType() method. -// This implementation is thread-safe. -func NewSingleFrameReader(b []byte, ct ContentType) FrameReader { - return &singleFrameReader{ - ct: ct, - b: b, - hasBeenRead: 0, - } -} - -// singleFrameReader implements the FrameReader interface. -var _ FrameReader = &singleFrameReader{} - -type singleFrameReader struct { - ct ContentType - b []byte - hasBeenRead uint32 -} - -func (r *singleFrameReader) ReadFrame() ([]byte, error) { - // The first time this function executes; hasBeenRead == 0. The atomic compare-and-swap - // operation checks if hasBeenRead == 0, and if so, sets it to one and returns true. - // This means that r.b will ever only be returned exactly once, as all the other cases - // (when hasBeenRead == 1), the compare-and-swap operation will return false => io.EOF. - if atomic.CompareAndSwapUint32(&r.hasBeenRead, 0, 1) { - // The first time, return the single frame we store - return r.b, nil - } - return nil, io.EOF -} - -func (r *singleFrameReader) ContentType() ContentType { return r.ct } -func (r *singleFrameReader) Close() error { return nil } - -// NewSingleFrameWriter returns a FrameWriter for only a single frame of -// the specified content type, using the underlying Writer. This FrameWriter -// will only ever write once; any successive calls will result in a io.ErrClosedPipe. -// This FrameWriter works for any ContentType and transparently exposes the given -// content type through the ContentType() method. -// This implementation is thread-safe. -func NewSingleFrameWriter(w Writer, ct ContentType) FrameWriter { - return &singleFrameWriter{ - ct: ct, - w: w, - hasBeenWritten: 0, - } -} - -// singleFrameWriter implements the FrameWriter interface. -var _ FrameWriter = &singleFrameWriter{} - -type singleFrameWriter struct { - ct ContentType - w Writer - hasBeenWritten uint32 -} - -func (r *singleFrameWriter) Write(p []byte) (n int, err error) { - // The first time this function executes; hasBeenWritten == 0. The atomic compare-and-swap - // operation checks if hasBeenWritten == 0, and if so, sets it to one and returns true. - // This means that r.b will ever only be returned exactly once, as all the other cases - // (when hasBeenWritten == 1), the compare-and-swap operation will return false => io.ErrClosedPipe. - if atomic.CompareAndSwapUint32(&r.hasBeenWritten, 0, 1) { - // The first time, write to the underlying writer - n, err = r.w.Write(p) - return - } - err = io.ErrClosedPipe - return -} - -func (r *singleFrameWriter) ContentType() ContentType { return r.ct } diff --git a/pkg/serializer/frame_utils.go b/pkg/serializer/frame_utils.go deleted file mode 100644 index 12c65e16..00000000 --- a/pkg/serializer/frame_utils.go +++ /dev/null @@ -1,37 +0,0 @@ -package serializer - -import "io" - -// FrameList is a list of frames (byte arrays), used for convenience functions -type FrameList [][]byte - -// ReadFrameList is a convenience method that reads all available frames from the FrameReader -// into a returned FrameList -func ReadFrameList(fr FrameReader) (FrameList, error) { - // TODO: Create an unit test for this function - var frameList [][]byte - for { - // Read until we get io.EOF or an error - frame, err := fr.ReadFrame() - if err == io.EOF { - break - } else if err != nil { - return nil, err - } - // Append all frames to the returned list - frameList = append(frameList, frame) - } - return frameList, nil -} - -// WriteFrameList is a convenience method that writes a set of frames to a FrameWriter -func WriteFrameList(fw FrameWriter, frameList FrameList) error { - // TODO: Create an unit test for this function - // Loop all frames in the list, and write them individually to the FrameWriter - for _, frame := range frameList { - if _, err := fw.Write(frame); err != nil { - return err - } - } - return nil -} diff --git a/pkg/serializer/frame_writer.go b/pkg/serializer/frame_writer.go deleted file mode 100644 index d2f0fc45..00000000 --- a/pkg/serializer/frame_writer.go +++ /dev/null @@ -1,128 +0,0 @@ -package serializer - -import ( - "io" -) - -const ( - yamlSeparator = "---\n" -) - -// Writer in this package is an alias for io.Writer. It helps in Godoc to locate -// helpers in this package which returns writers (i.e. ToBytes) -type Writer io.Writer - -// FrameWriter is a ContentType-specific io.Writer that writes given frames in an applicable way -// to an underlying io.Writer stream -type FrameWriter interface { - ContentTyped - Writer -} - -// NewFrameWriter returns a new FrameWriter for the given Writer and ContentType -func NewFrameWriter(contentType ContentType, w Writer) FrameWriter { - switch contentType { - case ContentTypeYAML: - // Use our own implementation of the underlying YAML FrameWriter - return &frameWriter{newYAMLWriter(w), contentType} - case ContentTypeJSON: - // Comment from k8s.io/apimachinery/pkg/runtime/serializer/json.Framer.NewFrameWriter: - // "we can write JSON objects directly to the writer, because they are self-framing" - // Hence, we directly use w without any modifications. - return &frameWriter{w, contentType} - default: - return &errFrameWriter{ErrUnsupportedContentType, contentType} - } -} - -// NewYAMLFrameWriter returns a FrameWriter that writes YAML frames separated by "---\n" -// -// This call is the same as NewFrameWriter(ContentTypeYAML, w) -func NewYAMLFrameWriter(w Writer) FrameWriter { - return NewFrameWriter(ContentTypeYAML, w) -} - -// NewJSONFrameWriter returns a FrameWriter that writes JSON frames without separation -// (i.e. "{ ... }{ ... }{ ... }" on the wire) -// -// This call is the same as NewFrameWriter(ContentTypeYAML, w) -func NewJSONFrameWriter(w Writer) FrameWriter { - return NewFrameWriter(ContentTypeJSON, w) -} - -// frameWriter is an implementation of the FrameWriter interface -type frameWriter struct { - Writer - - contentType ContentType - - // TODO: Maybe add mutexes for thread-safety (so no two goroutines write at the same time) -} - -// ContentType returns the content type for the given FrameWriter -func (wf *frameWriter) ContentType() ContentType { - return wf.contentType -} - -// newYAMLWriter returns a new yamlWriter implementation -func newYAMLWriter(w Writer) *yamlWriter { - return &yamlWriter{ - w: w, - hasWritten: false, - } -} - -// yamlWriter writes yamlSeparator between documents -type yamlWriter struct { - w io.Writer - hasWritten bool -} - -// Write implements io.Writer -func (w *yamlWriter) Write(p []byte) (n int, err error) { - // If we've already written some documents, add the separator in between - if w.hasWritten { - _, err = w.w.Write([]byte(yamlSeparator)) - if err != nil { - return - } - } - - // Write the given bytes to the underlying writer - n, err = w.w.Write(p) - if err != nil { - return - } - - // Mark that we've now written once and should write the separator in between - w.hasWritten = true - return -} - -// ToBytes returns a Writer which can be passed to NewFrameWriter. The Writer writes directly -// to an underlying byte array. The byte array must be of enough length in order to write. -func ToBytes(p []byte) Writer { - return &byteWriter{p, 0} -} - -type byteWriter struct { - to []byte - // the next index to write to - index int -} - -func (w *byteWriter) Write(from []byte) (n int, err error) { - // Check if we have space in to, in order to write bytes there - if w.index+len(from) > len(w.to) { - err = io.ErrShortBuffer - return - } - // Copy over the bytes one by one - for i := range from { - w.to[w.index+i] = from[i] - } - // Increase the index for the next Write call's target position - w.index += len(from) - n += len(from) - return -} diff --git a/pkg/serializer/frame_writer_test.go b/pkg/serializer/frame_writer_test.go deleted file mode 100644 index 988dacbc..00000000 --- a/pkg/serializer/frame_writer_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package serializer - -import ( - "bytes" - "testing" -) - -func Test_byteWriter_Write(t *testing.T) { - type fields struct { - to []byte - index int - } - type args struct { - from []byte - } - tests := []struct { - name string - fields fields - args args - wantN int - wantErr bool - }{ - { - name: "simple case", - fields: fields{ - to: make([]byte, 50), - }, - args: args{ - from: []byte("Hello!\nFoobar"), - }, - wantN: 13, - wantErr: false, - }, - { - name: "target too short", - fields: fields{ - to: make([]byte, 10), - }, - args: args{ - from: []byte("Hello!\nFoobar"), - }, - wantN: 0, - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - w := &byteWriter{ - to: tt.fields.to, - index: tt.fields.index, - } - gotN, err := w.Write(tt.args.from) - if (err != nil) != tt.wantErr { - t.Errorf("byteWriter.Write() error = %v, wantErr %v", err, tt.wantErr) - return - } - if gotN != tt.wantN { - t.Errorf("byteWriter.Write() = %v, want %v", gotN, tt.wantN) - return - } - if !tt.wantErr && !bytes.Equal(tt.fields.to[:gotN], tt.args.from) { - t.Errorf("byteWriter.Write(): expected fields.to (%s) to equal args.from (%s), but didn't", tt.fields.to[:gotN], tt.args.from) - } - }) - } -} diff --git a/pkg/serializer/patch.go b/pkg/serializer/patch.go index d6987334..c2b17379 100644 --- a/pkg/serializer/patch.go +++ b/pkg/serializer/patch.go @@ -5,6 +5,8 @@ import ( "encoding/json" "errors" + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/frame" "github.com/weaveworks/libgitops/pkg/util/patch" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/strategicpatch" @@ -93,7 +95,7 @@ func (p *patcher) ApplyOnStruct(bytePatcher patch.BytePatcher, patch []byte, obj // Encode without conversion to the buffer var buf bytes.Buffer - if err := p.encoder.EncodeForGroupVersion(NewJSONFrameWriter(&buf), obj, gvk.GroupVersion()); err != nil { + if err := p.encoder.EncodeForGroupVersion(frame.NewJSONWriter(content.ToBuffer(&buf)), obj, gvk.GroupVersion()); err != nil { return err } @@ -110,7 +112,7 @@ func (p *patcher) ApplyOnStruct(bytePatcher patch.BytePatcher, patch []byte, obj } // Decode into the object to apply the changes - fr := NewSingleFrameReader(newJSON, ContentTypeJSON) + fr := frame.NewSingleJSONReader(content.FromBytes(newJSON)) if err := p.decoder.DecodeInto(fr, obj); err != nil { return err } diff --git a/pkg/serializer/serializer.go b/pkg/serializer/serializer.go index 8f8b5fdf..c1a4ca8f 100644 --- a/pkg/serializer/serializer.go +++ b/pkg/serializer/serializer.go @@ -3,11 +3,13 @@ package serializer import ( "errors" + "github.com/weaveworks/libgitops/pkg/frame" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" k8sserializer "k8s.io/apimachinery/pkg/runtime/serializer" ) +/* // ContentType specifies a content type for Encoders, Decoders, FrameWriters and FrameReaders type ContentType string @@ -19,15 +21,16 @@ const ( // ContentTypeYAML specifies usage of YAML as the content type. // It is an alias for k8s.io/apimachinery/pkg/runtime.ContentTypeYAML ContentTypeYAML = ContentType(runtime.ContentTypeYAML) -) +)*/ var ( // ErrUnsupportedContentType is returned if the specified content type isn't supported - ErrUnsupportedContentType = errors.New("unsupported content type") + //ErrUnsupportedContentType = errors.New("unsupported content type") // ErrObjectIsNotList is returned when a runtime.Object was not a List type ErrObjectIsNotList = errors.New("given runtime.Object is not a *List type, or does not implement metav1.ListInterface") ) +/* // ContentTyped is an interface for objects that are specific to a set ContentType. type ContentTyped interface { // ContentType returns the ContentType (usually ContentTypeYAML or ContentTypeJSON) for the given object. @@ -35,7 +38,7 @@ type ContentTyped interface { } func (ct ContentType) ContentType() ContentType { return ct } - +*/ // Serializer is an interface providing high-level decoding/encoding functionality // for types registered in a *runtime.Scheme type Serializer interface { @@ -77,12 +80,12 @@ type Encoder interface { // The FrameWriter specifies the ContentType. This encoder will automatically convert any // internal object given to the preferred external groupversion. No conversion will happen // if the given object is of an external version. - Encode(fw FrameWriter, obj ...runtime.Object) error + Encode(fw frame.Writer, obj ...runtime.Object) error // EncodeForGroupVersion encodes the given object for the specific groupversion. If the object // is not of that version currently it will try to convert. The output bytes are written to the // FrameWriter. The FrameWriter specifies the ContentType. - EncodeForGroupVersion(fw FrameWriter, obj runtime.Object, gv schema.GroupVersion) error + EncodeForGroupVersion(fw frame.Writer, obj runtime.Object, gv schema.GroupVersion) error // SchemeLock exposes the underlying LockedScheme GetLockedScheme() LockedScheme @@ -110,7 +113,7 @@ type Decoder interface { // If opts.DecodeUnknown is true, any type with an unrecognized apiVersion/kind will be returned as a // *runtime.Unknown object instead of returning a UnrecognizedTypeError. // opts.DecodeListElements is not applicable in this call. - Decode(fr FrameReader) (runtime.Object, error) + Decode(fr frame.Reader) (runtime.Object, error) // DecodeInto decodes the next document in the FrameReader stream into obj if the types are matching. // If there are multiple documents in the underlying stream, this call will read one @@ -129,7 +132,7 @@ type Decoder interface { // opts.DecodeUnknown is not applicable in this call. In case you want to decode an object into a // *runtime.Unknown, just create a runtime.Unknown object and pass the pointer as obj into DecodeInto // and it'll work. - DecodeInto(fr FrameReader, obj runtime.Object) error + DecodeInto(fr frame.Reader, obj runtime.Object) error // DecodeAll returns the decoded objects from all documents in the FrameReader stream. The underlying // stream is automatically closed on io.EOF. io.EOF is never returned from this function. @@ -146,7 +149,7 @@ type Decoder interface { // added into the returning slice. The v1.List will in this case not be returned. // If opts.DecodeUnknown is true, any type with an unrecognized apiVersion/kind will be returned as a // *runtime.Unknown object instead of returning a UnrecognizedTypeError. - DecodeAll(fr FrameReader) ([]runtime.Object, error) + DecodeAll(fr frame.Reader) ([]runtime.Object, error) // SchemeLock exposes the underlying LockedScheme GetLockedScheme() LockedScheme diff --git a/pkg/serializer/serializer_test.go b/pkg/serializer/serializer_test.go index c475ec7c..260e6e16 100644 --- a/pkg/serializer/serializer_test.go +++ b/pkg/serializer/serializer_test.go @@ -7,6 +7,9 @@ import ( "strings" "testing" + "github.com/stretchr/testify/assert" + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/frame" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" @@ -30,9 +33,10 @@ var ( ext1gv = schema.GroupVersion{Group: groupname, Version: "v1alpha1"} ext2gv = schema.GroupVersion{Group: groupname, Version: "v1alpha2"} - intsb = runtime.NewSchemeBuilder(addInternalTypes) - ext1sb = runtime.NewSchemeBuilder(registerConversions, addExternalTypes(ext1gv), v1_addDefaultingFuncs, registerOldCRD) - ext2sb = runtime.NewSchemeBuilder(registerConversions, addExternalTypes(ext2gv), v2_addDefaultingFuncs, registerNewCRD) + intsb = runtime.NewSchemeBuilder(addInternalTypes) + ext1sb = runtime.NewSchemeBuilder(registerConversions, addExternalTypes(ext1gv), v1_addDefaultingFuncs, registerOldCRD) + ext2sb = runtime.NewSchemeBuilder(registerConversions, addExternalTypes(ext2gv), v2_addDefaultingFuncs, registerNewCRD) + yamlSep = []byte("---\n") ) func v1_addDefaultingFuncs(scheme *runtime.Scheme) error { @@ -251,38 +255,45 @@ var ( newCRDMeta = metav1.TypeMeta{APIVersion: "foogroup/v1alpha2", Kind: "CRD"} unknownMeta = runtime.TypeMeta{APIVersion: "unknown/v1", Kind: "YouDontRecognizeMe"} - oneSimple = []byte(`apiVersion: foogroup/v1alpha1 + oneSimple = []byte(`--- +apiVersion: foogroup/v1alpha1 kind: Simple testString: foo `) - simpleUnknownField = []byte(`apiVersion: foogroup/v1alpha1 + simpleUnknownField = []byte(`--- +apiVersion: foogroup/v1alpha1 kind: Simple testString: foo unknownField: bar `) - simpleDuplicateField = []byte(`apiVersion: foogroup/v1alpha1 + simpleDuplicateField = []byte(`--- +apiVersion: foogroup/v1alpha1 kind: Simple testString: foo testString: bar `) - unrecognizedVersion = []byte(`apiVersion: foogroup/v1alpha0 + unrecognizedVersion = []byte(`--- +apiVersion: foogroup/v1alpha0 kind: Simple testString: foo `) - unrecognizedGVK = []byte(`apiVersion: unknown/v1 + unrecognizedGVK = []byte(`--- +apiVersion: unknown/v1 kind: YouDontRecognizeMe testFooBar: true `) - oneComplex = []byte(`Int64: 0 + oneComplex = []byte(`--- +Int64: 0 apiVersion: foogroup/v1alpha1 bool: false int: 0 kind: Complex string: bar `) - simpleAndComplex = []byte(string(oneSimple) + "---\n" + string(oneComplex)) + simpleAndComplex = []byte(string(oneSimple) + string(oneComplex)) - testList = []byte(`apiVersion: v1 + testList = []byte(`--- +apiVersion: v1 kind: List items: - apiVersion: foogroup/v1alpha1 @@ -303,7 +314,8 @@ items: complexJSON = []byte(`{"apiVersion":"foogroup/v1alpha1","kind":"Complex","string":"bar","int":0,"Int64":0,"bool":false} `) - oldCRD = []byte(`# I'm a top comment + oldCRD = []byte(`--- +# I'm a top comment apiVersion: foogroup/v1alpha1 kind: CRD metadata: @@ -312,14 +324,16 @@ metadata: testString: foobar # Me too `) - oldCRDNoComments = []byte(`apiVersion: foogroup/v1alpha1 + oldCRDNoComments = []byte(`--- +apiVersion: foogroup/v1alpha1 kind: CRD metadata: creationTimestamp: null testString: foobar `) - newCRD = []byte(`# I'm a top comment + newCRD = []byte(`--- +# I'm a top comment apiVersion: foogroup/v1alpha2 kind: CRD metadata: @@ -328,7 +342,8 @@ metadata: otherString: foobar # Me too `) - newCRDNoComments = []byte(`apiVersion: foogroup/v1alpha2 + newCRDNoComments = []byte(`--- +apiVersion: foogroup/v1alpha2 kind: CRD metadata: creationTimestamp: null @@ -342,34 +357,30 @@ func TestEncode(t *testing.T) { oldCRDObj := &CRDOldVersion{TestString: "foobar"} newCRDObj := &CRDNewVersion{OtherString: "foobar"} tests := []struct { - name string - ct ContentType - objs []runtime.Object - expected []byte - expectedErr bool + name string + ct content.ContentType + objs []runtime.Object + want []byte + wantErr error }{ - {"simple yaml", ContentTypeYAML, []runtime.Object{simpleObj}, oneSimple, false}, - {"complex yaml", ContentTypeYAML, []runtime.Object{complexObj}, oneComplex, false}, - {"both simple and complex yaml", ContentTypeYAML, []runtime.Object{simpleObj, complexObj}, simpleAndComplex, false}, - {"simple json", ContentTypeJSON, []runtime.Object{simpleObj}, simpleJSON, false}, - {"complex json", ContentTypeJSON, []runtime.Object{complexObj}, complexJSON, false}, - {"old CRD yaml", ContentTypeYAML, []runtime.Object{oldCRDObj}, oldCRDNoComments, false}, - {"new CRD yaml", ContentTypeYAML, []runtime.Object{newCRDObj}, newCRDNoComments, false}, + {"simple yaml", content.ContentTypeYAML, []runtime.Object{simpleObj}, oneSimple, nil}, + {"complex yaml", content.ContentTypeYAML, []runtime.Object{complexObj}, oneComplex, nil}, + {"both simple and complex yaml", content.ContentTypeYAML, []runtime.Object{simpleObj, complexObj}, simpleAndComplex, nil}, + {"simple json", content.ContentTypeJSON, []runtime.Object{simpleObj}, simpleJSON, nil}, + {"complex json", content.ContentTypeJSON, []runtime.Object{complexObj}, complexJSON, nil}, + {"old CRD yaml", content.ContentTypeYAML, []runtime.Object{oldCRDObj}, oldCRDNoComments, nil}, + {"new CRD yaml", content.ContentTypeYAML, []runtime.Object{newCRDObj}, newCRDNoComments, nil}, //{"no-conversion simple", defaultEncoder, &runtimetest.ExternalSimple{TestString: "foo"}, simpleJSON, false}, //{"support internal", defaultEncoder, []runtime.Object{simpleObj}, []byte(`{"testString":"foo"}` + "\n"), false}, } for _, rt := range tests { t.Run(rt.name, func(t2 *testing.T) { - buf := new(bytes.Buffer) - actualErr := defaultEncoder.Encode(NewFrameWriter(rt.ct, buf), rt.objs...) - actual := buf.Bytes() - if (actualErr != nil) != rt.expectedErr { - t2.Errorf("expected error %t but actual %t: %v", rt.expectedErr, actualErr != nil, actualErr) - } - if !bytes.Equal(actual, rt.expected) { - t2.Errorf("expected %q but actual %q", string(rt.expected), string(actual)) - } + var buf bytes.Buffer + cw := content.ToBuffer(&buf, content.WithContentType(rt.ct)) + err := defaultEncoder.Encode(frame.NewRecognizingWriter(cw), rt.objs...) + assert.ErrorIs(t, err, rt.wantErr) + assert.Equal(t, string(rt.want), buf.String()) }) } } @@ -381,8 +392,8 @@ func TestDecode(t *testing.T) { data []byte doDefaulting bool doConversion bool - expected runtime.Object - expectedErr bool + want runtime.Object + wantErr bool }{ {"old CRD hub conversion", oldCRD, false, true, &CRDNewVersion{newCRDMeta, metav1.ObjectMeta{}, "Old string foobar"}, false}, {"old CRD no conversion", oldCRD, false, false, &CRDOldVersion{oldCRDMeta, metav1.ObjectMeta{}, "foobar"}, false}, @@ -401,16 +412,12 @@ func TestDecode(t *testing.T) { for _, rt := range tests { t.Run(rt.name, func(t2 *testing.T) { - obj, actual := ourserializer.Decoder( + obj, err := ourserializer.Decoder( DefaultAtDecode(rt.doDefaulting), ConvertToHub(rt.doConversion), - ).Decode(NewYAMLFrameReader(FromBytes(rt.data))) - if (actual != nil) != rt.expectedErr { - t2.Errorf("expected error %t but actual %t: %v", rt.expectedErr, actual != nil, actual) - } - if rt.expected != nil && !reflect.DeepEqual(obj, rt.expected) { - t2.Errorf("expected %#v but actual %#v", rt.expected, obj) - } + ).Decode(frame.NewYAMLReader(content.FromBytes(rt.data))) + assert.Equal(t, err != nil, rt.wantErr) + assert.Equal(t, rt.want, obj) }) } } @@ -433,8 +440,8 @@ func TestDecodeInto(t *testing.T) { {"complex external", oneComplex, false, &runtimetest.ExternalComplex{}, &runtimetest.ExternalComplex{TypeMeta: complexv1Meta, String: "bar"}, false}, {"defaulted complex external", oneComplex, true, &runtimetest.ExternalComplex{}, &runtimetest.ExternalComplex{TypeMeta: complexv1Meta, String: "bar", Integer64: 5}, false}, {"defaulted complex internal", oneComplex, true, &runtimetest.InternalComplex{}, &runtimetest.InternalComplex{String: "bar", Integer64: 5}, false}, - {"decode unknown obj into unknown", unrecognizedGVK, false, &runtime.Unknown{}, newUnknown(unknownMeta, unrecognizedGVK), false}, - {"decode known obj into unknown", oneComplex, false, &runtime.Unknown{}, newUnknown(complexv1Meta, oneComplex), false}, + {"decode unknown obj into unknown", unrecognizedGVK, false, &runtime.Unknown{}, newUnknown(unknownMeta, bytes.TrimPrefix(unrecognizedGVK, yamlSep)), false}, + {"decode known obj into unknown", oneComplex, false, &runtime.Unknown{}, newUnknown(complexv1Meta, bytes.TrimPrefix(oneComplex, yamlSep)), false}, {"no unknown fields", simpleUnknownField, false, &runtimetest.InternalSimple{}, nil, true}, {"no duplicate fields", simpleDuplicateField, false, &runtimetest.InternalSimple{}, nil, true}, {"no unrecognized API version", unrecognizedVersion, false, &runtimetest.InternalSimple{}, nil, true}, @@ -445,7 +452,7 @@ func TestDecodeInto(t *testing.T) { actual := ourserializer.Decoder( DefaultAtDecode(rt.doDefaulting), - ).DecodeInto(NewYAMLFrameReader(FromBytes(rt.data)), rt.obj) + ).DecodeInto(frame.NewYAMLReader(content.FromBytes(rt.data)), rt.obj) if (actual != nil) != rt.expectedErr { t2.Errorf("expected error %t but actual %t: %v", rt.expectedErr, actual != nil, actual) } @@ -486,7 +493,7 @@ func TestDecodeAll(t *testing.T) { objs, actual := ourserializer.Decoder( DefaultAtDecode(rt.doDefaulting), DecodeListElements(rt.listSplit), - ).DecodeAll(NewYAMLFrameReader(FromBytes(rt.data))) + ).DecodeAll(frame.NewYAMLReader(content.FromBytes(rt.data))) if (actual != nil) != rt.expectedErr { t2.Errorf("expected error %t but actual %t: %v", rt.expectedErr, actual != nil, actual) } @@ -519,7 +526,7 @@ func TestDecodeUnknown(t *testing.T) { expected runtime.Object expectedErr bool }{ - {"Decode unrecognized kinds into runtime.Unknown", unrecognizedGVK, true, newUnknown(unknownMeta, unrecognizedGVK), false}, + {"Decode unrecognized kinds into runtime.Unknown", unrecognizedGVK, true, newUnknown(unknownMeta, bytes.TrimPrefix(unrecognizedGVK, yamlSep)), false}, {"Decode known kinds into known structs", oneComplex, true, &runtimetest.ExternalComplex{TypeMeta: complexv1Meta, String: "bar"}, false}, {"No support for unrecognized", unrecognizedGVK, false, nil, true}, } @@ -528,7 +535,7 @@ func TestDecodeUnknown(t *testing.T) { t.Run(rt.name, func(t2 *testing.T) { obj, actual := ourserializer.Decoder( DecodeUnknown(rt.unknown), - ).Decode(NewYAMLFrameReader(FromBytes(rt.data))) + ).Decode(frame.NewYAMLReader(content.FromBytes(rt.data))) if (actual != nil) != rt.expectedErr { t2.Errorf("expected error %t but actual %t: %v", rt.expectedErr, actual != nil, actual) } @@ -543,15 +550,15 @@ func TestRoundtrip(t *testing.T) { tests := []struct { name string data []byte - ct ContentType + ct content.ContentType gv *schema.GroupVersion // use a specific groupversion if set. if nil, then use the default Encode }{ - {"simple yaml", oneSimple, ContentTypeYAML, nil}, - {"complex yaml", oneComplex, ContentTypeYAML, nil}, - {"simple json", simpleJSON, ContentTypeJSON, nil}, - {"complex json", complexJSON, ContentTypeJSON, nil}, - {"crd with objectmeta & comments", oldCRD, ContentTypeYAML, &ext1gv}, // encode as v1alpha1 - {"unknown object", unrecognizedGVK, ContentTypeYAML, nil}, + {"simple yaml", oneSimple, content.ContentTypeYAML, nil}, + {"complex yaml", oneComplex, content.ContentTypeYAML, nil}, + {"simple json", simpleJSON, content.ContentTypeJSON, nil}, + {"complex json", complexJSON, content.ContentTypeJSON, nil}, + {"crd with objectmeta & comments", oldCRD, content.ContentTypeYAML, &ext1gv}, // encode as v1alpha1 + {"unknown object", unrecognizedGVK, content.ContentTypeYAML, nil}, // TODO: Maybe an unit test (case) for a type with ObjectMeta embedded as a pointer being nil // TODO: Make sure that the Encode call (with comments support) doesn't mutate the object state // i.e. doesn't remove the annotation after use so multiple similar encode calls work. @@ -563,16 +570,17 @@ func TestRoundtrip(t *testing.T) { ConvertToHub(true), PreserveCommentsStrict, DecodeUnknown(true), - ).Decode(NewYAMLFrameReader(FromBytes(rt.data))) + ).Decode(frame.NewYAMLReader(content.FromBytes(rt.data))) if err != nil { t2.Errorf("unexpected decode error: %v", err) return } - buf := new(bytes.Buffer) + var buf bytes.Buffer + cw := content.ToBuffer(&buf, content.WithContentType(rt.ct)) if rt.gv == nil { - err = defaultEncoder.Encode(NewFrameWriter(rt.ct, buf), obj) + err = defaultEncoder.Encode(frame.NewRecognizingWriter(cw), obj) } else { - err = defaultEncoder.EncodeForGroupVersion(NewFrameWriter(rt.ct, buf), obj, *rt.gv) + err = defaultEncoder.EncodeForGroupVersion(frame.NewRecognizingWriter(cw), obj, *rt.gv) } actual := buf.Bytes() if err != nil { @@ -684,13 +692,13 @@ testString: bar func TestListRoundtrip(t *testing.T) { objs, err := ourserializer.Decoder( WithCommentsDecode(true), - ).DecodeAll(NewYAMLFrameReader(FromBytes(testList))) + ).DecodeAll(frame.NewYAMLReader(content.FromBytes(testList))) if err != nil { t.Fatal(err) } buf := new(bytes.Buffer) - if err := defaultEncoder.Encode(NewFrameWriter(ContentTypeYAML, buf), objs...); err != nil { + if err := defaultEncoder.Encode(frame.NewWriter(content.ContentTypeYAML, buf), objs...); err != nil { t.Fatal(err) } actual := buf.Bytes() From e71ba147478c2c7c9cfcec0921365ee9d63a1bf6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Tue, 27 Jul 2021 14:51:22 +0300 Subject: [PATCH 143/149] Update the storage system to use the framing library --- pkg/storage/backend/backend.go | 9 ++++-- pkg/storage/event/interfaces.go | 1 + .../fileevents/inotify/filewatcher_test.go | 8 ++++- pkg/storage/filesystem/filefinder_simple.go | 8 ++--- pkg/storage/filesystem/format.go | 30 +++++++++---------- pkg/storage/filesystem/storage.go | 4 +-- .../filesystem/unstructured/interfaces.go | 6 ++-- .../filesystem/unstructured/recognizer.go | 3 +- .../filesystem/unstructured/storage.go | 17 ++++++----- pkg/storage/interfaces.go | 4 +-- 10 files changed, 51 insertions(+), 39 deletions(-) diff --git a/pkg/storage/backend/backend.go b/pkg/storage/backend/backend.go index 0021f0f1..8b157aa9 100644 --- a/pkg/storage/backend/backend.go +++ b/pkg/storage/backend/backend.go @@ -6,6 +6,8 @@ import ( "errors" "fmt" + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/frame" "github.com/weaveworks/libgitops/pkg/serializer" "github.com/weaveworks/libgitops/pkg/storage" "github.com/weaveworks/libgitops/pkg/storage/core" @@ -184,7 +186,7 @@ func (b *Generic) Get(ctx context.Context, obj Object) error { return err } // Read the underlying bytes - content, err := b.storage.Read(ctx, id) + data, err := b.storage.Read(ctx, id) if err != nil { return err } @@ -195,7 +197,8 @@ func (b *Generic) Get(ctx context.Context, obj Object) error { } // TODO: Check if the decoder "replaces" already-set fields or "leaks" old data? - return b.decoder.DecodeInto(serializer.NewSingleFrameReader(content, ct), obj) + // TODO: Here it'd be great with a frame.FromSingleBytes method + return b.decoder.DecodeInto(frame.NewSingleReader(ct, content.FromBytes(data)), obj) } // ListGroupKinds returns all known GroupKinds by the implementation at that @@ -336,7 +339,7 @@ func (b *Generic) write(ctx context.Context, id core.ObjectID, obj Object) error var objBytes bytes.Buffer // This FrameWriter works for any content type; and transparently writes to objBytes - fw := serializer.NewSingleFrameWriter(&objBytes, ct) + fw := frame.ToSingleBuffer(ct, &objBytes) // The encoder is set to use the given ContentType through fw; and encodes obj. if err := b.encoder.EncodeForGroupVersion(fw, obj, gv); err != nil { return err diff --git a/pkg/storage/event/interfaces.go b/pkg/storage/event/interfaces.go index 7d2a1e69..f72a9cf6 100644 --- a/pkg/storage/event/interfaces.go +++ b/pkg/storage/event/interfaces.go @@ -11,6 +11,7 @@ import ( // a possiblility to listen for changes to objects as they change. // TODO: Maybe we could use some of controller-runtime's built-in functionality // for watching for changes? +// TODO: Use k8s.io/apimachinery/pkg/watch#EventType et al instead. type Storage interface { storage.Storage diff --git a/pkg/storage/filesystem/fileevents/inotify/filewatcher_test.go b/pkg/storage/filesystem/fileevents/inotify/filewatcher_test.go index c423f247..cb1105d9 100644 --- a/pkg/storage/filesystem/fileevents/inotify/filewatcher_test.go +++ b/pkg/storage/filesystem/fileevents/inotify/filewatcher_test.go @@ -109,8 +109,14 @@ func (e FileEventTypes) String() string { } func TestEventConcatenation(t *testing.T) { + // TODO: Needs fixing + tmp := t.TempDir() + fw, err := NewFileWatcher(tmp) + if err != nil { + t.Fatal(err) + } for i, e := range testEvents { - result := extractEventTypes((&FileWatcher{}).concatenateEvents(e)) + result := extractEventTypes(fw.(*FileWatcher).concatenateEvents(e)) if !eventsEqual(result, targets[i]) { t.Errorf("wrong concatenation result: %v != %v", result, targets[i]) } diff --git a/pkg/storage/filesystem/filefinder_simple.go b/pkg/storage/filesystem/filefinder_simple.go index 52d60b20..2cad315e 100644 --- a/pkg/storage/filesystem/filefinder_simple.go +++ b/pkg/storage/filesystem/filefinder_simple.go @@ -7,7 +7,7 @@ import ( "path/filepath" "strings" - "github.com/weaveworks/libgitops/pkg/serializer" + "github.com/weaveworks/libgitops/pkg/content" "github.com/weaveworks/libgitops/pkg/storage" "github.com/weaveworks/libgitops/pkg/storage/core" "k8s.io/apimachinery/pkg/util/sets" @@ -31,7 +31,7 @@ func NewSimpleFileFinder(fs Filesystem, opts SimpleFileFinderOptions) (*SimpleFi if fs == nil { return nil, fmt.Errorf("NewSimpleFileFinder: fs is mandatory") } - ct := serializer.ContentTypeJSON + ct := content.ContentTypeJSON if len(opts.ContentType) != 0 { ct = opts.ContentType } @@ -94,8 +94,8 @@ type SimpleFileFinderOptions struct { DisableGroupDirectory bool // Default: ""; means use file names as the means of storage SubDirectoryFileName string - // Default: serializer.ContentTypeJSON - ContentType serializer.ContentType + // Default: content.ContentTypeJSON + ContentType content.ContentType // Default: DefaultFileExtensionResolver FileExtensionResolver FileExtensionResolver } diff --git a/pkg/storage/filesystem/format.go b/pkg/storage/filesystem/format.go index b36aa1cd..95913500 100644 --- a/pkg/storage/filesystem/format.go +++ b/pkg/storage/filesystem/format.go @@ -6,7 +6,7 @@ import ( "fmt" "path/filepath" - "github.com/weaveworks/libgitops/pkg/serializer" + "github.com/weaveworks/libgitops/pkg/content" ) var ( @@ -21,15 +21,15 @@ type ContentTyper interface { // ContentTypeForPath should return the content type for the file that exists in // the given Filesystem (path is relative). If the content type cannot be determined // please return a wrapped ErrCannotDetermineContentType error. - ContentTypeForPath(ctx context.Context, fs Filesystem, path string) (serializer.ContentType, error) + ContentTypeForPath(ctx context.Context, fs Filesystem, path string) (content.ContentType, error) } // DefaultContentTypes describes the default connection between // file extensions and a content types. var DefaultContentTyper ContentTyper = ContentTypeForExtension{ - ".json": serializer.ContentTypeJSON, - ".yaml": serializer.ContentTypeYAML, - ".yml": serializer.ContentTypeYAML, + ".json": content.ContentTypeJSON, + ".yaml": content.ContentTypeYAML, + ".yml": content.ContentTypeYAML, } // ContentTypeForExtension implements the ContentTyper interface @@ -39,12 +39,12 @@ var DefaultContentTyper ContentTyper = ContentTypeForExtension{ // the corresponding content type. There might be many extensions which // map to the same content type, e.g. both ".yaml" -> ContentTypeYAML // and ".yml" -> ContentTypeYAML. -type ContentTypeForExtension map[string]serializer.ContentType +type ContentTypeForExtension map[string]content.ContentType -func (m ContentTypeForExtension) ContentTypeForPath(ctx context.Context, _ Filesystem, path string) (serializer.ContentType, error) { +func (m ContentTypeForExtension) ContentTypeForPath(ctx context.Context, _ Filesystem, path string) (content.ContentType, error) { ct, ok := m[filepath.Ext(path)] if !ok { - return serializer.ContentType(""), fmt.Errorf("%w for file %q", ErrCannotDetermineContentType, path) + return content.ContentType(""), fmt.Errorf("%w for file %q", ErrCannotDetermineContentType, path) } return ct, nil } @@ -52,10 +52,10 @@ func (m ContentTypeForExtension) ContentTypeForPath(ctx context.Context, _ Files // StaticContentTyper always responds with the same, statically-set, ContentType for any path. type StaticContentTyper struct { // ContentType is a required field - ContentType serializer.ContentType + ContentType content.ContentType } -func (t StaticContentTyper) ContentTypeForPath(_ context.Context, _ Filesystem, _ string) (serializer.ContentType, error) { +func (t StaticContentTyper) ContentTypeForPath(_ context.Context, _ Filesystem, _ string) (content.ContentType, error) { if len(t.ContentType) == 0 { return "", fmt.Errorf("StaticContentTyper.ContentType must not be empty") } @@ -69,21 +69,21 @@ type FileExtensionResolver interface { // The returned string MUST start with a dot, e.g. ".json". If the given // ContentType is not known, it is recommended to return a wrapped // ErrUnrecognizedContentType. - ExtensionForContentType(ct serializer.ContentType) (string, error) + ExtensionForContentType(ct content.ContentType) (string, error) } // DefaultFileExtensionResolver describes a default connection between // the file extensions and ContentTypes , namely JSON -> ".json" and // YAML -> ".yaml". var DefaultFileExtensionResolver FileExtensionResolver = ExtensionForContentType{ - serializer.ContentTypeJSON: ".json", - serializer.ContentTypeYAML: ".yaml", + content.ContentTypeJSON: ".json", + content.ContentTypeYAML: ".yaml", } // ExtensionForContentType is a simple map implementation of FileExtensionResolver. -type ExtensionForContentType map[serializer.ContentType]string +type ExtensionForContentType map[content.ContentType]string -func (m ExtensionForContentType) ExtensionForContentType(ct serializer.ContentType) (string, error) { +func (m ExtensionForContentType) ExtensionForContentType(ct content.ContentType) (string, error) { ext, ok := m[ct] if !ok { return "", fmt.Errorf("%q: %q", ErrUnrecognizedContentType, ct) diff --git a/pkg/storage/filesystem/storage.go b/pkg/storage/filesystem/storage.go index d3a6b4ac..8aeb2fd1 100644 --- a/pkg/storage/filesystem/storage.go +++ b/pkg/storage/filesystem/storage.go @@ -6,7 +6,7 @@ import ( "os" "path/filepath" - "github.com/weaveworks/libgitops/pkg/serializer" + "github.com/weaveworks/libgitops/pkg/content" "github.com/weaveworks/libgitops/pkg/storage" "github.com/weaveworks/libgitops/pkg/storage/core" "k8s.io/apimachinery/pkg/util/sets" @@ -92,7 +92,7 @@ func (r *Generic) Checksum(ctx context.Context, id core.UnversionedObjectID) (st return checksum, nil } -func (r *Generic) ContentType(ctx context.Context, id core.UnversionedObjectID) (serializer.ContentType, error) { +func (r *Generic) ContentType(ctx context.Context, id core.UnversionedObjectID) (content.ContentType, error) { // Get the path and verify namespacing info p, err := r.getPath(ctx, id) if err != nil { diff --git a/pkg/storage/filesystem/unstructured/interfaces.go b/pkg/storage/filesystem/unstructured/interfaces.go index 7d0b64bf..16e6680e 100644 --- a/pkg/storage/filesystem/unstructured/interfaces.go +++ b/pkg/storage/filesystem/unstructured/interfaces.go @@ -3,7 +3,7 @@ package unstructured import ( "context" - "github.com/weaveworks/libgitops/pkg/serializer" + "github.com/weaveworks/libgitops/pkg/frame" "github.com/weaveworks/libgitops/pkg/storage/core" "github.com/weaveworks/libgitops/pkg/storage/filesystem" ) @@ -24,7 +24,7 @@ type Storage interface { // ObjectRecognizer returns the underlying ObjectRecognizer used. ObjectRecognizer() ObjectRecognizer // FrameReaderFactory returns the underlying FrameReaderFactory used. - FrameReaderFactory() serializer.FrameReaderFactory + FrameReaderFactory() frame.ReaderFactory // PathExcluder specifies what paths to not sync. Can possibly be nil. PathExcluder() filesystem.PathExcluder // UnstructuredFileFinder returns the underlying unstructured.FileFinder used. @@ -35,7 +35,7 @@ type Storage interface { type ObjectRecognizer interface { // RecognizeObjectIDs returns the ObjectIDs present in the file with the given name, // content type and content (in the FrameReader). - RecognizeObjectIDs(fileName string, fr serializer.FrameReader) ([]core.ObjectID, error) + RecognizeObjectIDs(fileName string, fr frame.Reader) ([]core.ObjectID, error) } // FileFinder is an extension to filesystem.FileFinder that allows it to have an internal diff --git a/pkg/storage/filesystem/unstructured/recognizer.go b/pkg/storage/filesystem/unstructured/recognizer.go index 931abac9..de092c5f 100644 --- a/pkg/storage/filesystem/unstructured/recognizer.go +++ b/pkg/storage/filesystem/unstructured/recognizer.go @@ -5,6 +5,7 @@ import ( "fmt" "io" + "github.com/weaveworks/libgitops/pkg/frame" "github.com/weaveworks/libgitops/pkg/serializer" "github.com/weaveworks/libgitops/pkg/storage/core" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -33,7 +34,7 @@ type KubeObjectRecognizer struct { AllowDuplicates bool } -func (r KubeObjectRecognizer) RecognizeObjectIDs(_ string, fr serializer.FrameReader) ([]core.ObjectID, error) { +func (r KubeObjectRecognizer) RecognizeObjectIDs(_ string, fr frame.Reader) ([]core.ObjectID, error) { if r.Decoder == nil { return nil, errors.New("programmer error: KubeObjectRecognizer.Decoder is nil") } diff --git a/pkg/storage/filesystem/unstructured/storage.go b/pkg/storage/filesystem/unstructured/storage.go index c61cc94a..730eea83 100644 --- a/pkg/storage/filesystem/unstructured/storage.go +++ b/pkg/storage/filesystem/unstructured/storage.go @@ -6,7 +6,8 @@ import ( "fmt" "github.com/sirupsen/logrus" - "github.com/weaveworks/libgitops/pkg/serializer" + "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/frame" "github.com/weaveworks/libgitops/pkg/storage/core" "github.com/weaveworks/libgitops/pkg/storage/filesystem" ) @@ -18,7 +19,7 @@ func NewGeneric( storage filesystem.Storage, recognizer ObjectRecognizer, pathExcluder filesystem.PathExcluder, - framingFactory serializer.FrameReaderFactory, + framingFactory frame.ReaderFactory, ) (Storage, error) { if storage == nil { return nil, fmt.Errorf("storage is mandatory") @@ -28,7 +29,7 @@ func NewGeneric( } // optional: use YAML/JSON by default. if framingFactory == nil { - framingFactory = serializer.NewFrameReaderFactory() + framingFactory = frame.DefaultFactory() } fileFinder, ok := storage.FileFinder().(FileFinder) if !ok { @@ -48,7 +49,7 @@ type Generic struct { recognizer ObjectRecognizer fileFinder FileFinder pathExcluder filesystem.PathExcluder - framingFactory serializer.FrameReaderFactory + framingFactory frame.ReaderFactory } // Sync synchronizes the current state of the filesystem, and overwrites all @@ -124,7 +125,7 @@ func (s *Generic) ObjectRecognizer() ObjectRecognizer { } // FrameReaderFactory returns the underlying FrameReaderFactory used. -func (s *Generic) FrameReaderFactory() serializer.FrameReaderFactory { +func (s *Generic) FrameReaderFactory() frame.ReaderFactory { return s.framingFactory } @@ -146,7 +147,7 @@ func RecognizeIDsInFile( ctx context.Context, fileFinder FileFinder, recognizer ObjectRecognizer, - framingFactory serializer.FrameReaderFactory, + framingFactory frame.ReaderFactory, filePath string, ) (core.UnversionedObjectIDSet, *ChecksumPath, bool, error) { fs := fileFinder.Filesystem() @@ -174,7 +175,7 @@ func RecognizeIDsInFile( // If the file is not known to the FileFinder yet, or if the checksum // was empty, read the file, and recognize it. - content, err := fs.ReadFile(ctx, filePath) + fileContent, err := fs.ReadFile(ctx, filePath) if err != nil { return nil, nil, false, fmt.Errorf("Could not read file %q: %v", filePath, err) } @@ -184,7 +185,7 @@ func RecognizeIDsInFile( return nil, nil, false, fmt.Errorf("Could not get content type for file %q: %v", filePath, err) } // Create a new FrameReader for the given ContentType and ReadCloser - fr := framingFactory.NewFrameReader(ct, serializer.FromBytes(content)) + fr := framingFactory.NewReader(ct, content.FromBytes(fileContent)) // Recognize all IDs in the file versionedIDs, err := recognizer.RecognizeObjectIDs(filePath, fr) if err != nil { diff --git a/pkg/storage/interfaces.go b/pkg/storage/interfaces.go index 60e2bcb5..9c026d9d 100644 --- a/pkg/storage/interfaces.go +++ b/pkg/storage/interfaces.go @@ -4,7 +4,7 @@ import ( "context" "errors" - "github.com/weaveworks/libgitops/pkg/serializer" + "github.com/weaveworks/libgitops/pkg/content" "github.com/weaveworks/libgitops/pkg/storage/core" "k8s.io/apimachinery/pkg/util/sets" ) @@ -73,7 +73,7 @@ type Reader interface { // the object with the given ID. This operation must function also before the // Object with the given id exists in the system, in order to be able to // create new Objects. - ContentType(ctx context.Context, id core.UnversionedObjectID) (serializer.ContentType, error) + ContentType(ctx context.Context, id core.UnversionedObjectID) (content.ContentType, error) // List operations Lister From 72be7a99ca6fa212241f2ea4f403b756a6551f1a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Thu, 29 Jul 2021 12:13:23 +0300 Subject: [PATCH 144/149] WIP --- go.mod | 1 + pkg/frame/sanitize/sanitize.go | 18 +- pkg/frame/sanitize/sanitize_test.go | 50 ++++ pkg/storage/client/transactional/client.go | 223 ++++++++---------- pkg/storage/client/transactional/commit.go | 8 +- .../client/transactional/commit/commit.go | 215 +++++++++++++++++ pkg/storage/client/transactional/handlers.go | 16 +- .../client/transactional/interfaces.go | 31 ++- pkg/storage/client/transactional/options.go | 12 + pkg/storage/client/transactional/tx.go | 4 +- pkg/storage/client/transactional/tx_common.go | 11 +- pkg/storage/core/versionref.go | 11 +- 12 files changed, 436 insertions(+), 164 deletions(-) create mode 100644 pkg/storage/client/transactional/commit/commit.go diff --git a/go.mod b/go.mod index 6d518c20..0dd0d7f2 100644 --- a/go.mod +++ b/go.mod @@ -28,6 +28,7 @@ require ( go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.0.0-RC2 go.opentelemetry.io/otel/sdk v1.0.0-RC2 go.opentelemetry.io/otel/trace v1.0.0-RC2 + go.uber.org/atomic v1.7.0 go.uber.org/multierr v1.6.0 go.uber.org/zap v1.17.0 golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 diff --git a/pkg/frame/sanitize/sanitize.go b/pkg/frame/sanitize/sanitize.go index d6e42a98..6fbe9792 100644 --- a/pkg/frame/sanitize/sanitize.go +++ b/pkg/frame/sanitize/sanitize.go @@ -71,11 +71,18 @@ type JSONYAMLOption interface { } type jsonYAMLOptions struct { + // Only applicable to JSON at the moment; YAML indentation config not supported Indentation *string // Only applicable to YAML; either yaml.CompactSequenceStyle or yaml.WideSequenceStyle ForceSeqIndentStyle yaml.SequenceIndentStyle - + // Only applicable to YAML; JSON doesn't support comments CopyComments *bool + /* + TODO: ForceMapKeyOrder that can either be + - PreserveOrder (if unset) => preserves the order from the prior if given. no-op if no prior. + - Alphabetic => sorts all keys alphabetically + - None => don't preserve order from the prior; no-op + */ } func defaultJSONYAMLOptions() *jsonYAMLOptions { @@ -126,6 +133,13 @@ func (defaultSanitizer) SupportedContentTypes() content.ContentTypes { var ErrTooManyFrames = errors.New("too many frames") +/* +- New policy got applied to all files +- Previously existing policy got applied +*/ + +// TODO: Make sure maps are alphabetically sorted, or match the prior +// Can e.g. use https://github.com/kubernetes-sigs/kustomize/blob/master/kyaml/order/syncorder.go func (s *defaultSanitizer) handleYAML(ctx context.Context, frame []byte) ([]byte, error) { // Get prior data, if any (from the context), that we'll use to copy comments over and // infer the sequence indenting style. @@ -179,6 +193,8 @@ func (s *defaultSanitizer) resolveSeqStyle(frame, priorData []byte, hasPriorData return yaml.SequenceIndentStyle(yaml.DeriveSeqIndentStyle(deriveYAML)) } +// TODO: Maybe use the "Remarshal" property defined here to apply alphabetic order? +// https://stackoverflow.com/questions/18668652/how-to-produce-json-with-sorted-keys-in-go func (s *defaultSanitizer) handleJSON(frame []byte) ([]byte, error) { // If it's all whitespace, just return an empty byte array, no actual content here if len(bytes.TrimSpace(frame)) == 0 { diff --git a/pkg/frame/sanitize/sanitize_test.go b/pkg/frame/sanitize/sanitize_test.go index cb8682a3..679629ea 100644 --- a/pkg/frame/sanitize/sanitize_test.go +++ b/pkg/frame/sanitize/sanitize_test.go @@ -313,6 +313,56 @@ items: # after - item3 +`, + }, + { + name: "copy comments; mappingnode keys are now alphabetically sorted", + ct: content.ContentTypeYAML, + opts: []JSONYAMLOption{}, + prior: `# root +# hello + +items: +# ignoreme + - item1 # hello + # bla + - item2 # hi + # after +kind: List # foo +# bla +apiVersion: v1 +notexist: foo # remember me! + +`, + frame: `--- +apiVersion: v1 +fruits: +- fruit1 +kind: List +items: +- item1 +- item2 +- item3 + +`, + want: `# root +# hello +# Comments lost during file manipulation: +# Field "notexist": "remember me!" + +# bla +apiVersion: v1 +fruits: + - fruit1 +items: + # ignoreme + - item1 # hello + # bla + - item2 # hi + # after + + - item3 +kind: List # foo `, }, { diff --git a/pkg/storage/client/transactional/client.go b/pkg/storage/client/transactional/client.go index bd29db97..f5a93e7b 100644 --- a/pkg/storage/client/transactional/client.go +++ b/pkg/storage/client/transactional/client.go @@ -5,17 +5,18 @@ import ( "errors" "fmt" "strings" - "sync/atomic" + "sync" "github.com/sirupsen/logrus" "github.com/weaveworks/libgitops/pkg/storage/backend" "github.com/weaveworks/libgitops/pkg/storage/client" + "github.com/weaveworks/libgitops/pkg/storage/client/transactional/commit" "github.com/weaveworks/libgitops/pkg/storage/core" - syncutil "github.com/weaveworks/libgitops/pkg/util/sync" + "go.uber.org/atomic" utilerrs "k8s.io/apimachinery/pkg/util/errors" ) -var _ Client = &Generic{} +var _ Client = &genericWithRef{} func NewGeneric(c client.Client, manager TransactionManager) (Client, error) { if c == nil { @@ -24,25 +25,22 @@ func NewGeneric(c client.Client, manager TransactionManager) (Client, error) { if manager == nil { return nil, fmt.Errorf("%w: manager is required", core.ErrInvalidParameter) } - g := &Generic{ - c: c, - lockMap: syncutil.NewNamedLockMap(), + g := &generic{ + c: c, + //lockMap: syncutil.NewNamedLockMap(), txHooks: &MultiTransactionHook{}, commitHooks: &MultiCommitHook{}, manager: manager, - //merger: merger, + txs: make(map[string]*atomic.Bool), + txsMu: &sync.Mutex{}, } - // We must be able to resolve versions - if g.versionRefResolver() == nil { - return nil, fmt.Errorf("%w: the underlying Client must provide a VersionRefResolver through its Storage", core.ErrInvalidParameter) - } - return g, nil + return &genericWithRef{g, commit.Default()}, nil } -type Generic struct { +type generic struct { c client.Client - lockMap syncutil.NamedLockMap + //lockMap syncutil.NamedLockMap // Hooks txHooks TransactionHookChain @@ -50,8 +48,27 @@ type Generic struct { // +required manager TransactionManager + + txs map[string]*atomic.Bool + txsMu *sync.Mutex +} + +type genericWithRef struct { + *generic + ref commit.Ref +} + +func (c *genericWithRef) AtRef(ref commit.Ref) Client { + return &genericWithRef{c.generic, ref} +} +func (c *genericWithRef) AtSymbolicRef(symbolic string) Client { + return c.AtRef(commit.At(symbolic)) +} +func (c *genericWithRef) CurrentRef() commit.Ref { + return c.ref } +/* type txLockKeyImpl struct{} var txLockKey = txLockKeyImpl{} @@ -61,54 +78,56 @@ type txLock struct { //mode TxMode // active == 1 means "transaction active, mu is locked for writing" // active == 0 means "transaction has stopped, mu has been unlocked" - active uint32 -} + //active uint32 + active *atomic.Bool +}*/ -func (c *Generic) Get(ctx context.Context, key core.ObjectKey, obj client.Object) error { - return c.lockAndRead(ctx, func() error { +func (c *genericWithRef) Get(ctx context.Context, key core.ObjectKey, obj client.Object) error { + return c.lockAndRead(ctx, func(ctx context.Context) error { return c.c.Get(ctx, key, obj) }) } -func (c *Generic) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - return c.lockAndRead(ctx, func() error { +func (c *genericWithRef) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + return c.lockAndRead(ctx, func(ctx context.Context) error { return c.c.List(ctx, list, opts...) }) } -func (c *Generic) versionRefResolver() core.VersionRefResolver { - return c.c.BackendReader().Storage().VersionRefResolver() -} - -func (c *Generic) lockForBranch(branch string) (syncutil.LockWithData, *txLock, bool) { +/*func (c *genericWithRef) lockForBranch(branch string) (syncutil.LockWithData, *txLock, bool) { lck := c.lockMap.LockByName(branch) txState, ok := lck.QLoad(txLockKey).(*txLock) return lck, txState, ok -} - -func (c *Generic) lockAndRead(ctx context.Context, callback func() error) error { - ref := core.GetVersionRef(ctx) +}*/ - _, immutable, err := c.versionRefResolver().ResolveVersionRef(ref) +func (c *genericWithRef) lockAndRead(ctx context.Context, callback func(ctx context.Context) error) error { + h, err := c.ref.Resolve(c.manager.RefResolver()) if err != nil { return err - } else if immutable { - // If this is an immutable revision, just continue the call - return callback() } - // At this point we know that ref is mutable (what we call a "branch" here), and commit is the fixed revision - lck := c.lockMap.LockByName(ref) - lck.Lock() - defer lck.Unlock() // TODO: At what point should we resolve the "branch" -> "commit" part? Should we expect that to be done in the // filesystem only? - return callback() + return callback(commit.WithHash(ctx, h)) } -func (c *Generic) initTx(ctx context.Context, info TxInfo) (context.Context, txFunc) { +func (c *genericWithRef) txStateByName(name string) *atomic.Bool { + // c.txsMu guards reads and writes of the c.txs map + c.txsMu.Lock() + defer c.txsMu.Unlock() + + // Check if information about a transaction on this branch exists. + state, ok := c.txs[name] + if ok { + return state + } + // if not, grow the txs map by one and return it + c.txs[name] = atomic.NewBool(false) + return c.txs[name] +} +func (c *genericWithRef) initTx(ctx context.Context, info TxInfo) (context.Context, txFunc, error) { // Get the head branch lock and status - lck := c.lockMap.LockByName(info.HeadBranch) + //lck := c.lockMap.LockByName(info.HeadBranch) // Wait for all reads to complete (in the case of the atomic more), // and then lock for writing. For non-atomic mode this uses the mutex @@ -119,12 +138,22 @@ func (c *Generic) initTx(ctx context.Context, info TxInfo) (context.Context, txF // regardless of mode. If atomic mode is enabled, this also waits // on any reads happening at this moment. For all modes, this ensures // transactions happen in order. - lck.Lock() + /*lck.Lock() txState := &txLock{ active: 1, // set tx state to "active" //mode: info.Options.Mode, // declare what transaction mode is used } - lck.Store(txLockKey, txState) + lck.Store(txLockKey, txState)*/ + + active := c.txStateByName(info.HeadBranch) + // If active == false, then this will switch active => true and return true + // If active == true, then no operation will take place, and false is returned + // In other words, if false is returned, a transaction is ongoing and we should + // return a temporal error + if !active.CAS(false, true) { + // TODO: Is this the right way? + return nil, nil, errors.New("transaction is already ongoing") + } // Create a child context with a timeout dlCtx, cleanupTimeout := context.WithTimeout(ctx, info.Options.Timeout) @@ -136,7 +165,7 @@ func (c *Generic) initTx(ctx context.Context, info TxInfo) (context.Context, txF return fmt.Errorf("Failed to cleanup branch %s after tx: %v", info.HeadBranch, err) } // Unlock the mutex so new transactions can take place on this branch - lck.Unlock() + //lck.Unlock() return nil } @@ -146,7 +175,7 @@ func (c *Generic) initTx(ctx context.Context, info TxInfo) (context.Context, txF <-dlCtx.Done() // This guard makes sure the cleanup function runs exactly // once, regardless of transaction end cause. - if atomic.CompareAndSwapUint32(&txState.active, 1, 0) { + if active.CAS(true, false) { if err := cleanupFunc(); err != nil { logrus.Errorf("Failed to cleanup after tx timeout: %v", err) } @@ -161,7 +190,7 @@ func (c *Generic) initTx(ctx context.Context, info TxInfo) (context.Context, txF // function and the above function race to set active => 0 // Regardless, due to the atomic nature of the operation, // cleanupFunc() will only be run once. - if atomic.CompareAndSwapUint32(&txState.active, 1, 0) { + if active.CAS(true, false) { // We can now stop the timeout timer cleanupTimeout() // Clean up the transaction @@ -170,51 +199,37 @@ func (c *Generic) initTx(ctx context.Context, info TxInfo) (context.Context, txF return nil } - return dlCtx, abortFunc + return dlCtx, abortFunc, nil } -func (c *Generic) cleanupAfterTx(ctx context.Context, info *TxInfo) error { - // Always both clean the branch, and run post-tx tasks +func (c *genericWithRef) cleanupAfterTx(ctx context.Context, info *TxInfo) error { + // Always both clean the writable area, and run post-tx tasks return utilerrs.NewAggregate([]error{ - // TODO: This should be "clean up the writable area" - c.manager.ResetToCleanVersion(ctx, info.Base), + c.manager.Abort(ctx, info), // TODO: should this be in its own goroutine to switch back to main // ASAP? c.TransactionHookChain().PostTransactionHook(ctx, *info), }) } -func (c *Generic) BackendReader() backend.Reader { +func (c *genericWithRef) BackendReader() backend.Reader { return c.c.BackendReader() } -/*func (c *Generic) BranchMerger() BranchMerger { - return c.merger -}*/ - -func (c *Generic) TransactionManager() TransactionManager { +func (c *genericWithRef) TransactionManager() TransactionManager { return c.manager } -func (c *Generic) TransactionHookChain() TransactionHookChain { +func (c *genericWithRef) TransactionHookChain() TransactionHookChain { return c.txHooks } -func (c *Generic) CommitHookChain() CommitHookChain { +func (c *genericWithRef) CommitHookChain() CommitHookChain { return c.commitHooks } -func (c *Generic) Transaction(ctx context.Context, opts ...TxOption) Tx { - tx, err := c.transaction(ctx, opts...) - if err != nil { - // TODO: Return a Tx with an error included - panic(err) - } - return tx -} - -func (c *Generic) BranchTransaction(ctx context.Context, headBranch string, opts ...TxOption) Tx { - tx, err := c.branchTransaction(ctx, headBranch, opts...) +func (c *genericWithRef) Transaction(ctx context.Context, headBranch string, opts ...TxOption) Tx { + tx, err := c.transaction(ctx, headBranch, opts...) if err != nil { // TODO: Return a Tx with an error included panic(err) @@ -224,51 +239,12 @@ func (c *Generic) BranchTransaction(ctx context.Context, headBranch string, opts var ErrVersionRefIsImmutable = errors.New("cannot execute transaction against immutable version ref") -func (c *Generic) transaction(ctx context.Context, opts ...TxOption) (Tx, error) { - // Rules: A transaction executes against "itself". - - // Parse options - o := defaultTxOptions().ApplyOptions(opts) - - ref := core.GetVersionRef(ctx) - - baseCommit, isImmutable, err := c.versionRefResolver().ResolveVersionRef(ref) +func (c *genericWithRef) transaction(ctx context.Context, headBranch string, opts ...TxOption) (Tx, error) { + // Get the immutable base version hash + baseHash, err := c.ref.Resolve(c.manager.RefResolver()) if err != nil { return nil, err } - // We cannot apply a transaction against an immutable version - if isImmutable { - return nil, fmt.Errorf("%w: %s", ErrVersionRefIsImmutable, ref) - } - - info := TxInfo{ - BaseCommit: baseCommit, - HeadBranch: ref, - Options: *o, - } - // Initialize the transaction - ctxWithDeadline, cleanupFunc := c.initTx(ctx, info) - - // Run pre-tx checks - if err := c.TransactionHookChain().PreTransactionHook(ctxWithDeadline, info); err != nil { - return nil, err - } - - return &txImpl{ - &txCommon{ - c: c.c, - manager: c.manager, - commitHook: c.CommitHookChain(), - ctx: ctxWithDeadline, - info: info, - cleanupFunc: cleanupFunc, - }, - }, nil -} - -func (c *Generic) branchTransaction(ctx context.Context, headBranch string, opts ...TxOption) (Tx, error) { - // Get the base version reference. It is ok if it's immutable, too. - baseRef := core.GetVersionRef(ctx) // Append random bytes to the end of the head branch if it ends with a dash if strings.HasSuffix(headBranch, "-") { @@ -279,38 +255,31 @@ func (c *Generic) branchTransaction(ctx context.Context, headBranch string, opts headBranch += suffix } - // Validate that the base and head branches are distinct - if baseRef == headBranch { - return nil, fmt.Errorf("head and target branches must not be the same") - } - - logrus.Debugf("Base VersionRef: %q. Head branch: %q.", baseRef, headBranch) + logrus.Debugf("Base commit hash: %q. Head branch: %q.", baseHash, headBranch) // Parse options o := defaultTxOptions().ApplyOptions(opts) - // Resolve what the base commit is - baseCommit, _, err := c.versionRefResolver().ResolveVersionRef(baseRef) - if err != nil { - return nil, err - } - info := TxInfo{ - BaseCommit: baseCommit, + BaseCommit: baseHash, HeadBranch: headBranch, Options: *o, } // Register the head branch with the context // TODO: We should register all of TxInfo here instead, or ...? - ctxWithHeadBranch := core.WithVersionRef(ctx, headBranch) + ctxWithHeadBranch := commit.WithMutable(ctx, commit.NewMutable(headBranch)) // Initialize the transaction - ctxWithDeadline, cleanupFunc := c.initTx(ctxWithHeadBranch, info) + ctxWithDeadline, cleanupFunc, err := c.initTx(ctxWithHeadBranch, info) + if err != nil { + return nil, err + } // Run pre-tx checks and create the new branch + // TODO: Use multierr? if err := utilerrs.NewAggregate([]error{ c.TransactionHookChain().PreTransactionHook(ctxWithDeadline, info), - c.manager.CreateBranch(ctxWithDeadline, headBranch), + c.manager.Init(ctxWithDeadline, &info), }); err != nil { return nil, err } diff --git a/pkg/storage/client/transactional/commit.go b/pkg/storage/client/transactional/commit.go index eeb5e9fa..a46cbccb 100644 --- a/pkg/storage/client/transactional/commit.go +++ b/pkg/storage/client/transactional/commit.go @@ -1,11 +1,6 @@ package transactional -import ( - "fmt" - - "github.com/fluxcd/go-git-providers/validation" -) - +/* // Commit describes a result of a transaction. type Commit interface { // GetAuthor describes the author of this commit. @@ -124,3 +119,4 @@ func (r GenericCommitMessage) String() string { } return r.Title } +*/ diff --git a/pkg/storage/client/transactional/commit/commit.go b/pkg/storage/client/transactional/commit/commit.go new file mode 100644 index 00000000..0cc32a73 --- /dev/null +++ b/pkg/storage/client/transactional/commit/commit.go @@ -0,0 +1,215 @@ +package commit + +import ( + "context" + "encoding/hex" + "errors" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/uuid" +) + +type Commit interface { + Hash() Hash + Author() Signature + Message() Message + Parents() []Hash +} + +type Request interface { + Author() Signature + Message() Message + Validate() error +} + +type Signature interface { + // Name describes the author's name (e.g. as per git config) + // +required + Name() string + // Email describes the author's email (e.g. as per git config). + // It is optional generally, but might be required by some specific + // implementations. + // +optional + Email() string + // When is the timestamp of the signature. + // +optional + When() *time.Time + // The String() method must return a (ideally both human- and machine- + // readable) concatenated string including the name and email (if + // applicable) of the author. + fmt.Stringer +} + +type Message interface { + // Title describes the change concisely, so it can be used e.g. as + // a commit message or PR title. Certain implementations might enforce + // character limits on this string. + // +required + Title() string + // Description contains optional extra, more detailed information + // about the change. + // +optional + Description() string + // The String() method must return a (ideally both human- and machine- + // readable) concatenated string including the title and description + // (if applicable) of the author. + fmt.Stringer +} + +type Hash interface { + Hash() []byte + String() string +} + +func WithHash(ctx context.Context, h Hash) context.Context { + if h == nil { + return ctx + } + return context.WithValue(ctx, hashCtxKey, h) +} + +func GetHash(ctx context.Context) Hash { + return ctx.Value(hashCtxKey).(Hash) +} + +type hashCtxKeyStruct struct{} + +var hashCtxKey = hashCtxKeyStruct{} + +type Ref interface { + Resolve(RefResolver) (Hash, error) +} + +type RefResolver interface { + ResolveSymbolic(SymbolicRef) (Hash, error) +} + +type Resolver interface { + ResolveHash(Hash) (Commit, error) +} + +func SHA1(h [20]byte) Hash { + b := make([]byte, 20) + copy(b, h[:]) + return &hash{hash: b, encoded: hex.EncodeToString(b)} +} + +func FromSHA1(hash string) Ref { + return &sha1Ref{ref: hash} +} + +func At(symbolic string) SymbolicRef { + return &symbolicRef{SymbolicTypeUnknown, symbolic, 0} +} + +func Default() SymbolicRef { + return AtBranch("") // Signifies the default branch +} + +func AtBranch(b string) SymbolicRef { + return Before(b, 0) +} + +func Before(b string, n uint8) SymbolicRef { + return &symbolicRef{SymbolicTypeBranch, b, n} +} + +func AtTag(t string) SymbolicRef { + return &symbolicRef{SymbolicTypeTag, t, 0} +} + +func AtHash(h string) SymbolicRef { + return &symbolicRef{SymbolicTypeHash, h, 0} +} + +type SymbolicType int + +const ( + SymbolicTypeUnknown SymbolicType = iota + SymbolicTypeHash + // A branch is generally a mutable + SymbolicTypeBranch + SymbolicTypeTag +) + +type SymbolicRef interface { + Ref + + String() string + Index() uint8 + Type() SymbolicType +} + +type hash struct { + hash []byte + encoded string +} + +func (h *hash) Hash() []byte { return h.hash } +func (h *hash) String() string { return h.encoded } + +type sha1Ref struct { + ref string +} + +func (r *sha1Ref) Resolve(RefResolver) (Hash, error) { + b, err := hex.DecodeString(r.ref) + if err != nil { + return nil, err + } + return &hash{hash: b, encoded: r.ref}, nil +} + +type symbolicRef struct { + st SymbolicType + ref string + index uint8 +} + +func (r *symbolicRef) String() string { return r.ref } +func (r *symbolicRef) Index() uint8 { return r.index } +func (r *symbolicRef) Type() SymbolicType { return r.st } +func (r *symbolicRef) Resolve(res RefResolver) (Hash, error) { + // This is probably resolver-specific + if r.index != 0 && r.st != SymbolicTypeUnknown && r.st != SymbolicTypeBranch { + return nil, errors.New("index only works for branches") + } + return res.ResolveSymbolic(r) +} + +type MutableTarget interface { + HeadBranch() string + BaseCommit() Hash + UUID() types.UID +} + +func NewMutableTarget(headBranch string, baseCommit Hash) MutableTarget { + return &mutableTarget{headBranch: headBranch, baseCommit: baseCommit, uuid: uuid.New()} +} + +type mutableTarget struct { + headBranch string + baseCommit Hash + uuid types.UID +} + +func (m *mutableTarget) HeadBranch() string { return m.headBranch } +func (m *mutableTarget) BaseCommit() Hash { return m.baseCommit } +func (m *mutableTarget) UUID() types.UID { return m.uuid } + +func WithMutableTarget(ctx context.Context, m MutableTarget) context.Context { + if m == nil { + return ctx + } + return context.WithValue(ctx, mutableCtxKey, m) +} + +func GetMutableTarget(ctx context.Context) MutableTarget { + return ctx.Value(mutableCtxKey).(MutableTarget) +} + +type mutableCtxKeyStruct struct{} + +var mutableCtxKey = mutableCtxKeyStruct{} diff --git a/pkg/storage/client/transactional/handlers.go b/pkg/storage/client/transactional/handlers.go index 6d4bb6ba..90cd3a55 100644 --- a/pkg/storage/client/transactional/handlers.go +++ b/pkg/storage/client/transactional/handlers.go @@ -3,11 +3,11 @@ package transactional import ( "context" - "github.com/weaveworks/libgitops/pkg/storage/core" + "github.com/weaveworks/libgitops/pkg/storage/client/transactional/commit" ) type TxInfo struct { - BaseCommit core.Commit + BaseCommit commit.Hash HeadBranch string Options TxOptions } @@ -25,13 +25,13 @@ type CommitHookChain interface { type CommitHook interface { // PreCommitHook executes arbitrary logic for the given transaction info // and commit info; if an error is returned, the commit won't happen. - PreCommitHook(ctx context.Context, commit Commit, info TxInfo) error + PreCommitHook(ctx context.Context, req commit.Request, info TxInfo) error // PostCommitHook executes arbitrary logic for the given transaction info // and commit info; if an error is returned, the commit will happen in the // case of a BranchTx on the head branch; but the transaction itself will // fail. In the case of a "normal" transaction; the commit will be made, // but later rolled back. - PostCommitHook(ctx context.Context, commit Commit, info TxInfo) error + PostCommitHook(ctx context.Context, req commit.Request, info TxInfo) error } var _ CommitHookChain = &MultiCommitHook{} @@ -45,24 +45,24 @@ func (m *MultiCommitHook) Register(h CommitHook) { m.CommitHooks = append(m.CommitHooks, h) } -func (m *MultiCommitHook) PreCommitHook(ctx context.Context, commit Commit, info TxInfo) error { +func (m *MultiCommitHook) PreCommitHook(ctx context.Context, req commit.Request, info TxInfo) error { for _, ch := range m.CommitHooks { if ch == nil { continue } - if err := ch.PreCommitHook(ctx, commit, info); err != nil { + if err := ch.PreCommitHook(ctx, req, info); err != nil { return err } } return nil } -func (m *MultiCommitHook) PostCommitHook(ctx context.Context, commit Commit, info TxInfo) error { +func (m *MultiCommitHook) PostCommitHook(ctx context.Context, req commit.Request, info TxInfo) error { for _, ch := range m.CommitHooks { if ch == nil { continue } - if err := ch.PostCommitHook(ctx, commit, info); err != nil { + if err := ch.PostCommitHook(ctx, req, info); err != nil { return err } } diff --git a/pkg/storage/client/transactional/interfaces.go b/pkg/storage/client/transactional/interfaces.go index 52b6c7eb..79b0cac5 100644 --- a/pkg/storage/client/transactional/interfaces.go +++ b/pkg/storage/client/transactional/interfaces.go @@ -4,12 +4,17 @@ import ( "context" "github.com/weaveworks/libgitops/pkg/storage/client" + "github.com/weaveworks/libgitops/pkg/storage/client/transactional/commit" "github.com/weaveworks/libgitops/pkg/storage/core" ) type Client interface { client.Reader + AtRef(commit.Ref) Client + AtSymbolicRef(string) Client + CurrentRef() commit.Ref + TransactionManager() TransactionManager // KeyedLock is used for locking operations targeting branches //KeyedLock() syncutil.NamedLockMap @@ -24,22 +29,33 @@ type Client interface { // Transaction creates a new transaction on the branch stored in the context, so that // no other writes to that branch can take place meanwhile. - Transaction(ctx context.Context, opts ...TxOption) Tx - // BranchTransaction creates a new "head" branch with the given {branchName} name, based + //Transaction(ctx context.Context, opts ...TxOption) Tx + + // Transaction creates a new "head" branch (if branchName) with the given {branchName} name, based // on the "base" branch in the context. The "base" branch is not locked for writing while // the transaction is running, but the head branch is. - BranchTransaction(ctx context.Context, branchName string, opts ...TxOption) Tx + Transaction(ctx context.Context, branchName string, opts ...TxOption) Tx } type TransactionManager interface { + // Init is run at the beginning of the transaction + Init(ctx context.Context, tx *TxInfo) error + + // Commit creates a new commit for the given branch. + // + Commit(ctx context.Context, tx *TxInfo, req commit.Request) error + + Abort(ctx context.Context, tx *TxInfo) error + + RefResolver() commit.RefResolver + CommitResolver() commit.Resolver + // CreateBranch creates a new branch with the given target branch name. It forks out // of the branch specified in the context. - CreateBranch(ctx context.Context, branch string) error + //CreateBranch(ctx context.Context, branch string) error // ResetToCleanVersion switches back to the given branch; but first discards all non-committed // changes. //ResetToCleanVersion(ctx context.Context, ref core.VersionRef) error - // Commit creates a new commit for the branch stored in the context. - Commit(ctx context.Context, commit Commit) error /*// LockVersionRef takes the VersionRef attached in the context, and makes sure that it is // "locked" to the current commit for a given branch. @@ -53,11 +69,12 @@ type TransactionManager interface { type CustomTxFunc func(ctx context.Context) error type Tx interface { - Commit(Commit) error + Commit(req commit.Request) error Abort(err error) error Client() client.Client + // TODO: Rename to Do/Run/Execute Custom(CustomTxFunc) Tx Get(key core.ObjectKey, obj client.Object) Tx diff --git a/pkg/storage/client/transactional/options.go b/pkg/storage/client/transactional/options.go index 5450d1e1..76109e35 100644 --- a/pkg/storage/client/transactional/options.go +++ b/pkg/storage/client/transactional/options.go @@ -16,7 +16,19 @@ func defaultTxOptions() *TxOptions { } type TxOptions struct { + // Timeout is the maximum time one run of the transaction can take. Timeout time.Duration + // Retry is by default 0, which means "no retries". If it's specified to be + // negative, retries (with backoff) are infinite. If the function specified is + // non-re-entrant, use a retry of only 0. + Retry *int32 + + // Success scenario for git would be if --ff-only succeeds cleanly. + // Git always tries an --ff-only git push in the beginning, then optionally + // tries some merge strategy, and then finally retries (return signature should + // be (error, bool) where the bool specifies whether to keep retrying or not) + // Git-recognized strategies are: AutoMerge (which is what "git pull" does by default) + MergeStrategy string //Mode TxMode } diff --git a/pkg/storage/client/transactional/tx.go b/pkg/storage/client/transactional/tx.go index 30c6b6cd..5b13d2e3 100644 --- a/pkg/storage/client/transactional/tx.go +++ b/pkg/storage/client/transactional/tx.go @@ -1,10 +1,12 @@ package transactional +import "github.com/weaveworks/libgitops/pkg/storage/client/transactional/commit" + type txImpl struct { *txCommon } -func (tx *txImpl) Commit(c Commit) error { +func (tx *txImpl) Commit(c commit.Request) error { // Run the operations, and try to create the commit if err := tx.tryApplyAndCommitOperations(c); err != nil { // If we failed with the transaction, abort directly diff --git a/pkg/storage/client/transactional/tx_common.go b/pkg/storage/client/transactional/tx_common.go index 0229f24d..42557a80 100644 --- a/pkg/storage/client/transactional/tx_common.go +++ b/pkg/storage/client/transactional/tx_common.go @@ -4,6 +4,7 @@ import ( "context" "github.com/weaveworks/libgitops/pkg/storage/client" + "github.com/weaveworks/libgitops/pkg/storage/client/transactional/commit" utilerrs "k8s.io/apimachinery/pkg/util/errors" ) @@ -32,25 +33,25 @@ func (tx *txCommon) Abort(err error) error { }) } -func (tx *txCommon) handlePreCommit(c Commit) txFunc { +func (tx *txCommon) handlePreCommit(c commit.Request) txFunc { return func() error { return tx.commitHook.PreCommitHook(tx.ctx, c, tx.info) } } -func (tx *txCommon) commit(c Commit) txFunc { +func (tx *txCommon) commit(c commit.Request) txFunc { return func() error { - return tx.manager.Commit(tx.ctx, c) + return tx.manager.Commit(tx.ctx, &tx.info, c) } } -func (tx *txCommon) handlePostCommit(c Commit) txFunc { +func (tx *txCommon) handlePostCommit(c commit.Request) txFunc { return func() error { return tx.commitHook.PostCommitHook(tx.ctx, c, tx.info) } } -func (tx *txCommon) tryApplyAndCommitOperations(c Commit) error { +func (tx *txCommon) tryApplyAndCommitOperations(c commit.Request) error { // If an error occurred already before, just return it directly if tx.err != nil { return tx.err diff --git a/pkg/storage/core/versionref.go b/pkg/storage/core/versionref.go index fe8591c4..f5728a07 100644 --- a/pkg/storage/core/versionref.go +++ b/pkg/storage/core/versionref.go @@ -1,9 +1,6 @@ package core -import ( - "context" -) - +/* type VersionRefResolver interface { //IsImmutable(ref string) (bool, error) // Turns a branch name into a commit hash. If ref already is an existing commit, this is a no-op. @@ -12,10 +9,6 @@ type VersionRefResolver interface { type Commit string -/*type VersionRef2 string - - */ - var versionRefKey = versionRefKeyImpl{} type versionRefKeyImpl struct{} @@ -38,7 +31,7 @@ func GetVersionRef(ctx context.Context) string { return "" } return r -} +}*/ /* // NewMutableVersionRef creates a new VersionRef for a given branch. It is From 54aead60f318048da6228b30a6527454ad5b29f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 2 Aug 2021 20:52:44 +0300 Subject: [PATCH 145/149] Check in progress on framing and sanitation documentation. --- framing.md | 318 ++++++++++++++++++++++++++++++++++++++++++++++++++ sanitation.md | 137 ++++++++++++++++++++++ 2 files changed, 455 insertions(+) create mode 100644 framing.md create mode 100644 sanitation.md diff --git a/framing.md b/framing.md new file mode 100644 index 00000000..27862399 --- /dev/null +++ b/framing.md @@ -0,0 +1,318 @@ +# Framing + +A frame is serialized bytes representing exactly one decodable object, into a Go struct. + +The framing package lives in `github.com/weaveworks/libgitops/pkg/frame`, providing YAML and JSON framing by default, but is extensible to other content types as well. + +A valid frame should not contain any frame separators (e.g. `---` for YAML), and must not be empty. A frame (and `frame.Reader` or `frame.Writer`) is content-type specific, where the content type is e.g. YAML or JSON. + +The source/destination byte stream that is being "framed" by a `frame.Reader` or `frame.Writer` can be for example a file, `/dev/std{in,out,err}`, an HTTP request, or some Go `string`/`[]byte`, for example. + +> Note that “frames” and “framer” terminology was borrowed from [`k8s.io/apimachinery`](TODO). Frame maps to the YAML 1.2 spec definition of “documents”, as per below. + +## Goals + +TODO + +## Noteworthy interfaces + +TODO + +## Default implementations + +- `frame.DefaultFactory()` gives you a combined `frame.ReaderFactory` and `frame.WriterFactory` that supports JSON and YAML. + +## Examples + +### YAML vs JSON frames + +This YAML stream contains two frames, i.e. 2 [YAML documents](https://yaml.org/spec/1.2/spec.html#id2800132): + +```yaml +--- +# Frame 1 +foo: bar +bla: true +--- +# Frame 2 +bar: 123 +--- +``` + +The similar list of frames in JSON would be represented as follows: + +```json +{ + "foo": "bar", + "bla": true +} +{ + "bar": 123 +} +``` + +An interesting observation about JSON is that it's "self-framing". The JSON decoder in Go can figure out where an object starts and ends, hence there's no need for extra frame separators, like in YAML. + +### Matching a Go struct + +"Decodable into a Go struct" means that for the example above, the first frame returned by a framer is: + +```yaml +# Frame 1 +foo: bar +bla: true +``` + +```yaml +# Frame 2 +bar: 123 +``` + +And this serialized content matches the following Go structs: + +```go +type T1 struct { + Foo string `json:"foo"` + Bla bool `json:"bla"` +} + +type T2 struct { + Bar int64 `json:"bar"` +} +``` + +Now, you might ask yourself, that if you look at a generic frame returned from the example above, how do you figure out whether a generic frame should be decoded into `T1` or `T2`, or any other type? + +One quick idea would be to annotate the serialized byte representation with some metadata about what content the frame describes. For example, there could be a `kind` field specifying `T1` and `T2` above, respectively. + +This is one of the reasons why Kubernetes has [Group, Version and Kinds](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#types-kinds). + +But if there's only a `kind` field, it'd be very easy to create naming conflicts, if the whole software ecosystem must agree on or allocate their `kind`s. + +For example: There could be `kind: Cluster`, but without any logical grouping, you wouldn't know if it's an etcd, MySQL or Kubernetes cluster that is being referred to. + +This is why there exists `group`s in Kubernetes as well. The `apiVersion` field of most Kubernetes-like objects is actually of form: `group/version`. (With exception to `apiVersion: v1` which has `group == ""` (also known as `core`) and `version=="v1"`) + +Shortly, the `group` serves as a virtual "namespace" of what the `kind` refers to. `version` specifies the schema of the given object. `version` is very important to allow your schema evolve over time. + +For example, imagine some kind of distributed database with the following initial schema + +```yaml +apiVersion: my-replicated-db.com/v1alpha1 +kind: Database +spec: + isReplicated: true # A simple boolean telling that the database should be replicated +``` + +(by convention, versioning starts from `v1alpha1`, that is, "the first alpha release of the first schema version") + +Over time, you realize that you actually need to specify _how_ many replicas there should be, so you release `v1alpha2` ("the second alpha release of the first schema version"): + +```yaml +apiVersion: my-replicated-db.com/v1alpha2 +kind: Database +spec: + replicas: 3 # A how many replicas should the database use? +``` + +Later, you realize that there is a need to distinguish between read and write replicas, hence you change the schema once again. But as you feel confident in this design, you upgrade the schema to `v1beta1` ("the first beta release of the first schema version"): + +```yaml +apiVersion: my-replicated-db.com/v1beta1 +kind: Database +spec: + replicas: # A how many read/write replicas should the database use? + read: 3 + write: 1 +``` + +Thanks to specifying the `version` as well, your application can support decoding all three different versions of the objects, as long as you include the corresponding Go structs for all three versions in your Go code. + +For now, we don't need to dive into how exactly to decode the frames, but it's important to notice that each frame probably should, for this reason, specify `apiVersion` and `kind`. With this, the example would look like: + +```yaml +# Frame 1 +apiVersion: foo.com/v1 +kind: T1 +foo: bar +bla: true +``` + +```yaml +# Frame 2 +apiVersion: foo.com/v1 +kind: T2 +bar: 123 +``` + +> Note: The struct name and the `kind` necessarily don't need to match, but this is by convention the far most popular way to do it. + +### Empty Frames + +Empty frames must be ignored, because they are not decodable; they don't map to exactly one Go struct. + +To illustrate, the following YAML file contains 2 frames: + +```yaml + +--- + +--- + +# Frame 1 +apiVersion: foo.com/v1 +kind: T1 +foo: bar +bla: true + +--- + + +--- + +# Frame 2 +apiVersion: foo.com/v1 +kind: T2 +bar: 123 + +--- +``` + +TODO: Investigate what happens (or should happen) if there's only comments in a frame. One thing that could be caught in the sanitation process is if the top-level document doesn't any children. However, shall we support retaining that comment-only frame? + +### Lists + +As per the [Kubernetes API conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#types-kinds), there are "special" kinds with the `List` suffix that contain multiple objects _within the same frame_. + +These lists are useful in the REST communication between `kubectl` and the API server, for example. If you want to get a set of same-kind items from the API server, you'd invoke an HTTP request along the lines of: + +```http +GET /api/v1/namespaces/default/services +``` + +and get a response of the form: + +```json +{ + "kind": "ServiceList", + "apiVersion": "v1", + "metadata": { + "resourceVersion": "606" + }, + "items": [ + { + "metadata": { + "name": "kubernetes", + "namespace": "default", + "labels": { + "component": "apiserver", + "provider": "kubernetes" + } + }, + "spec": { + "clusterIP": "10.96.0.1", + }, + "status": {} + } + ] +} +``` + +(this can be tested with `kubectl get --raw=/api/v1/namespaces/default/services | jq .`) + +Why bother returning a `kind: ServiceList` instead of a set of `kind: Service`, separated as JSON frames demonstrated above? + +The answer is: a need for returning metadata about the response itself. For example, we can see here that `.metadata.resourceVersion` of the `ServiceList` is set. Other examples of list metadata is pagination headers and information, in case the returned list would be too large to return in only one request. + +This does seem specific to just REST communication, and yes, pretty much it is. However, for controllers it presents a nice feature. + +The Go struct for typed list (like `ServiceList`), looks something like this: + +```go +// From https://github.com/kubernetes/api/blob/v0.21.1/core/v1/types.go#L4423 +type ServiceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Service `json:"items"` +} +``` + +If I, as a controller developer, would like to ask for a list of services, what do I do when using e.g. the `controller-runtime` [`Client`](https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.9.2/pkg/client#Reader)? + +The answer is, allocate an empty `ServiceList`, pass a pointer of that like follows to get the data: + +```go +var svclist []v1.ServiceList +err := client.List(ctx, &svclist) +// svclist.Items is now populated with all returned Services at page 1 +... +// If the list of services was larger than the allowed response size, a +// fraction of the services will be returned on the same call. But due to +// that during the first List call, the list's metadata was populated with +// information about what page to ask for next, one can just call List again +// to get the next page. +err := client.List(ctx, &svclist) +// consume more Services at page 2 +``` + +What is useful here, is that `svclist.Items` is of type `[]v1.Service` by definition. There is no need to cast generic objects to Services before using them. Additionally, if the list would contain something else than a `Service`, the decoder would be unable to decode and fail with an error. + +These are the existing advantages of using a `List`; these are documented here for additional context. + +Because both JSON and YAML support multiple frames, there is technically no direct need to use a `List` in e.g. files checked into Git, if the application reading the byte stream supports framing, that is. If the reading application does not support YAML/JSON framing, using a `List` that can be directly decoded is convenient. + +This gives us the conclusion that the following YAML file shall be treated as valid. + +```yaml +--- +apiVersion: cluster.x-k8s.io/v1alpha4 +kind: MachineList +items: +- apiVersion: cluster.x-k8s.io/v1alpha4 + kind: Machine + spec: + clusterName: "my-cluster" +- apiVersion: cluster.x-k8s.io/v1alpha4 + kind: Machine + spec: + clusterName: "other-cluster" +--- +--- +apiVersion: cluster.x-k8s.io/v1alpha4 +kind: Machine +spec: + clusterName: "other-cluster" +--- +``` + +How many valid frames are there in the above YAML stream? 2. There's one empty frame that is skipped, one `List` and one "normal" object. + +From a framing point of view, we don't know anything about what a `List` is, but it satisfies the contract defined above of being decodable into a single Go struct. + +### Limiting Frame Size and Count + +If you read a byte stream whose size you're unaware of, e.g. when reading from `/dev/stdin` or an HTTP request, you don't want to open yourself up to a situation where you read garbage forever, sent by a malicious actor. This represents a Denial of Service (DoS) attack vector for your application. + +To mitigate that, the builtin `frame.Reader` (and `frame.Writer`, but that's not as important, as the bytes are already in memory) has options to limit the size (byte count) of each frame, and the total frame count, to avoid this situation generally. + +The default frame size is 3 Megabytes, which matches the default Kubernetes API server maximum body size. + +### Recognizing Readers/Writers + +TODO + +TODO: We should maybe allow YAML as in "JSON with comments". How to auto-recognize? + +```yaml +# This is valid YAML, but invalid JSON, due to these comments +# This works, because YAML is a superset of JSON, and hence one +# can use any valid JSON file, with YAML "extensions" like comments. +{ + # Comment + "foo": "bar" # Comment +} +``` + +### Single Readers/Writers + +TODO (Any content type) diff --git a/sanitation.md b/sanitation.md new file mode 100644 index 00000000..1c53434d --- /dev/null +++ b/sanitation.md @@ -0,0 +1,137 @@ +# Frame Sanitation + +The frame sanitation package that lives in `github.com/weaveworks/libgitops/pkg/frame/sanitation` takes care of formatting frames in a user-configurable and content-type-specific way. + +This is useful, for example, when one would like to standardize the formatting of YAML and/or JSON in a Git repository. + +## Goals + +- Provide a way to, in a content-type specific way, set a "default" formatting (Similar purpose as `gofmt` and `rustfmt`) +- Minimize textual diffs when updating an object (e.g. writing back to git) +- Allow the user to specifically choose formatting options like spacing, field ordering +- Allow retaining auxiliary metadata in the frame, e.g. YAML comments + +## Default implementations + +- `sanitation.NewJSONYAML()` supports JSON and YAML with the following options: + - TODO + +## Examples + +### Minimizing YAML diffs + +Take this valid, but messy YAML file as an example of what a user might store in Git: + +"YAML File A": + +```yaml +--- +# root + +apiVersion: sample.com/v1 # bla +# hello +items: +# moveup + - item1 # hello + # bla + - item2 # hi + +kind: MyList # foo + +``` + +Say that you want to append a `item-3` string to the `items` list. You do a `yaml.Unmarshal` and `yaml.Marshal` using your favorite library, and this is what you'll get: + +"YAML File B": + +```yaml +apiVersion: sample.com/v1 +items: +- item1 +- item2 +- item3 +kind: MyList +``` + +That's nice and all, it's semantically the right content. However, it's lost all structure from the original YAML document, and the diff is huge and hard to understand: + +```diff +--- Expected ++++ Actual +@@ -1,13 +1,7 @@ +---- +-# root ++apiVersion: sample.com/v1 ++items: ++- item1 ++- item2 ++- item3 ++kind: MyList +-apiVersion: sample.com/v1 # bla +-# hello +-items: +-# moveup +- - item1 # hello +- # bla +- - item2 # hi +- +-kind: MyList # foo +- +``` + +However, if the user calls `sanitize.Sanitize` and gives "YAML File A" as the "original" document and gives "YAML File B" as the "current" document, the JSON/YAML sanitizer will merge these as follows: + +```yaml +# root +apiVersion: sample.com/v1 # bla +# hello +items: + # moveup + - item1 # hello + # bla + - item2 # hi + - item3 +kind: MyList # foo +``` + +With the diff: + +```diff +--- Expected ++++ Actual +@@ -1,4 +1,2 @@ +---- + # root +- + apiVersion: sample.com/v1 # bla +@@ -6,7 +4,7 @@ + items: +-# moveup ++ # moveup + - item1 # hello +- # bla ++ # bla + - item2 # hi +- ++ - item3 + kind: MyList # foo +``` + +Quite a difference! We can see that the + +- Comments from the original document are preserved + - This is achieved by walking the YAML nodes in the "original" document, and the "current" document. Whenever a comment is found in the "original" document, it is copied over to the "current". + - +- Comments are now aligned with the default indentation at that context + - As per the [YAML 1.2 spec](https://yaml.org/spec/1.2/spec.html#id2767100) "comments are not associated with a particular node". + - In practice, though, [gopkg.in/yaml.v3 +](https://pkg.go.dev/gopkg.in/yaml.v3) **does attach** comments to YAML nodes. Arguably, this is also what users do expect. + - Hence, what is happening when sanitizing this document is that all comments line up on the same indentation as it's context. +- The unnecessary `---` separator has been removed + - Frame separators should not be part of the frame + - Framing is handled by the [framer](framing.md) +- The list indentation is preserved + - That is, the list items of `items` are indented like in the original A document, but unlike current B +- Unnecessary newlines are removed + +TODO: Investigate what happens to comments when you prepend an item to a list. From a6be8d1eaec51f018f883951393d6df69d11cbfd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 2 Aug 2021 21:07:50 +0300 Subject: [PATCH 146/149] WIP new options pattern --- pkg/frame/options.go | 84 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 84 insertions(+) diff --git a/pkg/frame/options.go b/pkg/frame/options.go index 897e78fc..eb2b74ee 100644 --- a/pkg/frame/options.go +++ b/pkg/frame/options.go @@ -6,6 +6,90 @@ import ( "github.com/weaveworks/libgitops/pkg/util/limitedio" ) +// TODO: Figure out a new Options pattern, in the form of: + +/* +func SomeOperation(bla string, opts ...Option) { + o := defaultOpts().ApplyOptions(opts) + + // Call "downstream" + SomeCompositeOperation(bla, opts...) +} + +func SomeCompositeOperation(bla string, opts ...Option) { + o := defaultExtOpts().ApplyOptionsToExt(opts) +} + +func defaultOpts() *Options { + return &Options{"abc", nil} +} + +type Options struct { + Foo string + Bar *bool +} + +func (o *Options) GetOptions() *Options {return o} +func (o *Options) ApplyTo(t OptionsTarget) { + target := t.GetOptions() + if len(o.Foo) != 0 { + target.Foo = o.Foo + } + if o.Bar != nil { + target.Bar = o.Bar + } +} +func (o *Options) ApplyOptions(opts []Option) *Options { + for _, opt := range opts { + opt.ApplyTo(o) + } + return o +} + +func defaultExtOpts() *ExtOptions { + return &ExtOptions{ + OptionsTarget: defaultOpts, + Baz: 1, + } +} + +type ExtOptions struct { + OptionsTarget + Baz int64 +} + +func (o *ExtOptions) GetExtOptions() *ExtOptions {return o} +func (o *ExtOptions) ApplyTo(t OptionsTarget) { + ext, ok := t.(ExtOptionsTarget) + if !ok { + return + } + target := ext.GetExtOptions() + if o.Baz != 0 { + target.Baz = o.Baz + } +} +func (o *ExtOptions) ApplyOptionsToExt(opts []Option) *ExtOptions { + for _, opt := range opts { + opt.ApplyTo(o) + } + return o +} + +type Option interface { + ApplyTo(OptionsTarget) +} +type OptionsTarget interface { + GetOptions() *Options + // ApplyOptions(opts []Option) *Options +} +type ExtOptionsTarget interface { + OptionsTarget + GetExtOptions() *ExtOptions + // ApplyOptionsToExt(opts []Option) *ExtOptions +} +*/ + // DefaultMaxFrameCount specifies the default maximum of frames that can be read by a Reader. const DefaultReadMaxFrameCount = 1024 From 15b389fb9b140f713ebc58041c68150bc2381901 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Tue, 3 Aug 2021 19:54:46 +0300 Subject: [PATCH 147/149] WIP sanitation improvements --- pkg/frame/sanitize/sanitize.go | 29 ++++++++++++++- pkg/frame/sanitize/sanitize_test.go | 56 ++++++++++++++++++++++++++++- sanitation.md | 5 ++- 3 files changed, 87 insertions(+), 3 deletions(-) diff --git a/pkg/frame/sanitize/sanitize.go b/pkg/frame/sanitize/sanitize.go index 6fbe9792..71bebe82 100644 --- a/pkg/frame/sanitize/sanitize.go +++ b/pkg/frame/sanitize/sanitize.go @@ -76,7 +76,8 @@ type jsonYAMLOptions struct { // Only applicable to YAML; either yaml.CompactSequenceStyle or yaml.WideSequenceStyle ForceSeqIndentStyle yaml.SequenceIndentStyle // Only applicable to YAML; JSON doesn't support comments - CopyComments *bool + CopyComments *bool + ClearEmptyFields [][]string /* TODO: ForceMapKeyOrder that can either be - PreserveOrder (if unset) => preserves the order from the prior if given. no-op if no prior. @@ -89,6 +90,10 @@ func defaultJSONYAMLOptions() *jsonYAMLOptions { return (&jsonYAMLOptions{ Indentation: pointer.String(""), CopyComments: pointer.Bool(true), + ClearEmptyFields: [][]string{ + []string{"metadata", "creationTimestamp"}, + []string{"status"}, + }, }) } @@ -102,6 +107,9 @@ func (o *jsonYAMLOptions) applyToJSONYAML(target *jsonYAMLOptions) { if o.CopyComments != nil { target.CopyComments = o.CopyComments } + if o.ClearEmptyFields != nil { + target.ClearEmptyFields = o.ClearEmptyFields + } } func (o *jsonYAMLOptions) applyOptions(opts []JSONYAMLOption) *jsonYAMLOptions { @@ -147,6 +155,7 @@ func (s *defaultSanitizer) handleYAML(ctx context.Context, frame []byte) ([]byte // Parse the current node frameNodes, err := (&kio.ByteReader{ + // TODO: Is this a bug in kyaml? Reader: bytes.NewReader(append([]byte{'\n'}, frame...)), DisableUnwrapping: true, OmitReaderAnnotations: true, @@ -172,6 +181,24 @@ func (s *defaultSanitizer) handleYAML(ctx context.Context, frame []byte) ([]byte } } + for _, clearPath := range s.opts.ClearEmptyFields { + if len(clearPath) == 0 { + continue + } + filters := []yaml.Filter{} + if len(clearPath) > 1 { + // lookup the elements before the last element + filters = append(filters, yaml.Lookup(clearPath[:len(clearPath)-1]...)) + } + filters = append(filters, yaml.FieldClearer{ + Name: clearPath[len(clearPath)-1], // clear the last element + IfEmpty: true, + }) + if err := frameNode.PipeE(filters...); err != nil { + return nil, err + } + } + return yaml.MarshalWithOptions(frameNode.Document(), &yaml.EncoderOptions{ SeqIndent: s.resolveSeqStyle(frame, priorData, hasPriorData), }) diff --git a/pkg/frame/sanitize/sanitize_test.go b/pkg/frame/sanitize/sanitize_test.go index 679629ea..0be80f68 100644 --- a/pkg/frame/sanitize/sanitize_test.go +++ b/pkg/frame/sanitize/sanitize_test.go @@ -338,12 +338,14 @@ notexist: foo # remember me! apiVersion: v1 fruits: - fruit1 -kind: List + items: - item1 - item2 - item3 +kind: List + `, want: `# root # hello @@ -498,6 +500,28 @@ func TestIfSupported(t *testing.T) { ct: content.ContentTypeJSON, frame: ` { "foo" : true } `, want: `{"foo":true} +`, + }, + { // TODO: Test all possible corner cases with this, and move to the test above + name: "remove empty .metadata.creationTimestamp and .status", + s: NewJSONYAML(), + ct: content.ContentTypeYAML, + frame: `--- +apiVersion: v1 +kind: Pod +metadata: + name: foo + creationTimestamp: null +spec: + containers: null +status: {} +`, + want: `apiVersion: v1 +kind: Pod +metadata: + name: foo +spec: + containers: null `, }, } @@ -508,3 +532,33 @@ func TestIfSupported(t *testing.T) { }) } } + +/* +func ExampleClear() { + obj, err := yaml.Parse(` +kind: Deployment +metadata: null +spec: + template: {} +`) + if err != nil { + log.Fatal(err) + } + node, err := obj.Pipe(yaml.FieldClearer{Name: "metadata", IfEmpty: true}) + if err != nil { + log.Fatal(err) + } + fmt.Println(node.String()) + fmt.Println(obj.String()) + // Output: + // name: app + // annotations: + // a.b.c: d.e.f + // g: h + // + // kind: Deployment + // spec: + // template: {} + // +} +*/ diff --git a/sanitation.md b/sanitation.md index 1c53434d..05ad037f 100644 --- a/sanitation.md +++ b/sanitation.md @@ -125,7 +125,7 @@ Quite a difference! We can see that the - Comments are now aligned with the default indentation at that context - As per the [YAML 1.2 spec](https://yaml.org/spec/1.2/spec.html#id2767100) "comments are not associated with a particular node". - In practice, though, [gopkg.in/yaml.v3 -](https://pkg.go.dev/gopkg.in/yaml.v3) **does attach** comments to YAML nodes. Arguably, this is also what users do expect. +](https://pkg.go.dev/gopkg.in/yaml.v3) (and by extension, kyaml) **does attach** comments to YAML nodes. Arguably, this is also what users do expect. - Hence, what is happening when sanitizing this document is that all comments line up on the same indentation as it's context. - The unnecessary `---` separator has been removed - Frame separators should not be part of the frame @@ -135,3 +135,6 @@ Quite a difference! We can see that the - Unnecessary newlines are removed TODO: Investigate what happens to comments when you prepend an item to a list. +TODO: Show that it trims whitespace, e.g. `kind : Foo` becomes `kind: Foo` +TODO: Show that it removes empty fields from the YAML, e.g. `status: {}` or `creationTimestamp: null` +TODO: Share the context on why the above actually show up in YAML in the first place. From 20e744f95b9e68c1893c3c4d6ff2b100e5c7e10a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Wed, 4 Aug 2021 10:28:13 +0300 Subject: [PATCH 148/149] WIP --- go.mod | 10 +- go.sum | 55 +- pkg/storage/backend/backend.go | 23 +- pkg/storage/client/interfaces.go | 7 + pkg/storage/client/transactional/client.go | 136 ++-- .../client/transactional/commit/commit.go | 215 ------- .../transactional/distributed/client.go | 263 ++++---- .../distributed/git/filesystem.go | 583 +++++++++++++++++- .../transactional/distributed/git/git.go | 20 +- .../distributed/git/github/github.go | 94 +-- .../transactional/distributed/git/gogit.go | 41 +- .../distributed/git/gogit_test.go | 20 +- .../distributed/git/interfaces.go | 11 +- .../transactional/distributed/git/options.go | 2 +- .../transactional/distributed/interfaces.go | 16 +- pkg/storage/client/transactional/handlers.go | 19 +- .../client/transactional/interfaces.go | 16 +- pkg/storage/client/transactional/tx.go | 2 +- pkg/storage/client/transactional/tx_common.go | 6 +- pkg/storage/commit/commit.go | 257 ++++++++ pkg/storage/commit/pr/pull_request.go | 28 + pkg/storage/commit/request.go | 59 ++ pkg/storage/event/event.go | 8 +- pkg/storage/event/interfaces.go | 5 +- pkg/storage/filesystem/dir_traversal.go | 9 +- pkg/storage/filesystem/fileevents/events.go | 2 + .../filesystem/fileevents/interfaces.go | 2 + pkg/storage/filesystem/filefinder_simple.go | 35 +- pkg/storage/filesystem/filesystem.go | 167 ++--- pkg/storage/filesystem/storage.go | 53 +- .../btree/btree_versioned_index.go | 8 + .../filesystem/unstructured/event/storage.go | 13 +- .../unstructured/filefinder_mapped.go | 4 +- .../filesystem/unstructured/interfaces.go | 7 +- .../filesystem/unstructured/storage.go | 11 +- pkg/storage/interfaces.go | 7 +- 36 files changed, 1503 insertions(+), 711 deletions(-) delete mode 100644 pkg/storage/client/transactional/commit/commit.go create mode 100644 pkg/storage/commit/commit.go create mode 100644 pkg/storage/commit/pr/pull_request.go create mode 100644 pkg/storage/commit/request.go diff --git a/go.mod b/go.mod index 0dd0d7f2..a8a35755 100644 --- a/go.mod +++ b/go.mod @@ -30,12 +30,12 @@ require ( go.opentelemetry.io/otel/trace v1.0.0-RC2 go.uber.org/atomic v1.7.0 go.uber.org/multierr v1.6.0 - go.uber.org/zap v1.17.0 - golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 - k8s.io/apimachinery v0.21.2 + go.uber.org/zap v1.18.1 + golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c + k8s.io/apimachinery v0.21.3 k8s.io/kube-openapi v0.0.0-20210527164424-3c818078ee3d - k8s.io/utils v0.0.0-20210527160623-6fdb442a123b - sigs.k8s.io/controller-runtime v0.9.3 + k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471 + sigs.k8s.io/controller-runtime v0.9.5 sigs.k8s.io/kustomize/kyaml v0.11.1-0.20210721155208-d6ce84604738 sigs.k8s.io/yaml v1.2.0 ) diff --git a/go.sum b/go.sum index 77e8a284..f44c0ba4 100644 --- a/go.sum +++ b/go.sum @@ -61,6 +61,8 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -396,15 +398,14 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= -github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= +github.com/onsi/gomega v1.14.0 h1:ep6kpPVwmr/nTbklSx2nrLNSIO62DoYAhnPNIMhK8gI= +github.com/onsi/gomega v1.14.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= @@ -546,8 +547,8 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/ go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.18.1 h1:CSUJ2mjFszzEWt4CdKISEuChVIXGBn3lAPwkRGyVrc4= +go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -707,8 +708,8 @@ golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= @@ -727,8 +728,8 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6 h1:Vv0JUPWTyeqUq42B2WJ1FeIDjjvGKoA2Ss+Ts0lAVbs= -golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -890,18 +891,18 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.21.2 h1:vz7DqmRsXTCSa6pNxXwQ1IYeAZgdIsua+DZU+o+SX3Y= -k8s.io/api v0.21.2/go.mod h1:Lv6UGJZ1rlMI1qusN8ruAp9PUBFyBwpEHAdG24vIsiU= -k8s.io/apiextensions-apiserver v0.21.2 h1:+exKMRep4pDrphEafRvpEi79wTnCFMqKf8LBtlA3yrE= -k8s.io/apiextensions-apiserver v0.21.2/go.mod h1:+Axoz5/l3AYpGLlhJDfcVQzCerVYq3K3CvDMvw6X1RA= -k8s.io/apimachinery v0.21.2 h1:vezUc/BHqWlQDnZ+XkrpXSmnANSLbpnlpwo0Lhk0gpc= -k8s.io/apimachinery v0.21.2/go.mod h1:CdTY8fU/BlvAbJ2z/8kBwimGki5Zp8/fbVuLY8gJumM= -k8s.io/apiserver v0.21.2/go.mod h1:lN4yBoGyiNT7SC1dmNk0ue6a5Wi6O3SWOIw91TsucQw= -k8s.io/client-go v0.21.2 h1:Q1j4L/iMN4pTw6Y4DWppBoUxgKO8LbffEMVEV00MUp0= -k8s.io/client-go v0.21.2/go.mod h1:HdJ9iknWpbl3vMGtib6T2PyI/VYxiZfq936WNVHBRrA= -k8s.io/code-generator v0.21.2/go.mod h1:8mXJDCB7HcRo1xiEQstcguZkbxZaqeUOrO9SsicWs3U= -k8s.io/component-base v0.21.2 h1:EsnmFFoJ86cEywC0DoIkAUiEV6fjgauNugiw1lmIjs4= -k8s.io/component-base v0.21.2/go.mod h1:9lvmIThzdlrJj5Hp8Z/TOgIkdfsNARQ1pT+3PByuiuc= +k8s.io/api v0.21.3 h1:cblWILbLO8ar+Fj6xdDGr603HRsf8Wu9E9rngJeprZQ= +k8s.io/api v0.21.3/go.mod h1:hUgeYHUbBp23Ue4qdX9tR8/ANi/g3ehylAqDn9NWVOg= +k8s.io/apiextensions-apiserver v0.21.3 h1:+B6biyUWpqt41kz5x6peIsljlsuwvNAp/oFax/j2/aY= +k8s.io/apiextensions-apiserver v0.21.3/go.mod h1:kl6dap3Gd45+21Jnh6utCx8Z2xxLm8LGDkprcd+KbsE= +k8s.io/apimachinery v0.21.3 h1:3Ju4nvjCngxxMYby0BimUk+pQHPOQp3eCGChk5kfVII= +k8s.io/apimachinery v0.21.3/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI= +k8s.io/apiserver v0.21.3/go.mod h1:eDPWlZG6/cCCMj/JBcEpDoK+I+6i3r9GsChYBHSbAzU= +k8s.io/client-go v0.21.3 h1:J9nxZTOmvkInRDCzcSNQmPJbDYN/PjlxXT9Mos3HcLg= +k8s.io/client-go v0.21.3/go.mod h1:+VPhCgTsaFmGILxR/7E1N0S+ryO010QBeNCv5JwRGYU= +k8s.io/code-generator v0.21.3/go.mod h1:K3y0Bv9Cz2cOW2vXUrNZlFbflhuPvuadW6JdnN6gGKo= +k8s.io/component-base v0.21.3 h1:4WuuXY3Npa+iFfi2aDRiOz+anhNvRfye0859ZgfC5Og= +k8s.io/component-base v0.21.3/go.mod h1:kkuhtfEHeZM6LkX0saqSK8PbdO7A0HigUngmhhrwfGQ= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= @@ -913,19 +914,19 @@ k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2R k8s.io/kube-openapi v0.0.0-20210527164424-3c818078ee3d h1:lUK8GPtuJy8ClWZhuvKoaLdKGPLq9H1PxWp7VPBZBkU= k8s.io/kube-openapi v0.0.0-20210527164424-3c818078ee3d/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210527160623-6fdb442a123b h1:MSqsVQ3pZvPGTqCjptfimO2WjG7A9un2zcpiHkA6M/s= -k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471 h1:DnzUXII7sVg1FJ/4JX6YDRJfLNAC7idRatPwe07suiI= +k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= -sigs.k8s.io/controller-runtime v0.9.3 h1:n075bHQ1wb8hpX7C27pNrqsb0fj8mcfCQfNX+oKTbYE= -sigs.k8s.io/controller-runtime v0.9.3/go.mod h1:TxzMCHyEUpaeuOiZx/bIdc2T81vfs/aKdvJt9wuu0zk= +sigs.k8s.io/controller-runtime v0.9.5 h1:WThcFE6cqctTn2jCZprLICO6BaKZfhsT37uAapTNfxc= +sigs.k8s.io/controller-runtime v0.9.5/go.mod h1:q6PpkM5vqQubEKUKOM6qr06oXGzOBcCby1DA9FbyZeA= sigs.k8s.io/kustomize/kyaml v0.11.1-0.20210721155208-d6ce84604738 h1:Nkg3viu9IE/TSzvYt4GGy5FkhdPk3bptXuxW5TnU9uo= sigs.k8s.io/kustomize/kyaml v0.11.1-0.20210721155208-d6ce84604738/go.mod h1:FTJxEZ86ScK184NpGSAQcfEqee0nul8oLCK30D47m4E= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8= -sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/pkg/storage/backend/backend.go b/pkg/storage/backend/backend.go index 8b157aa9..eee027f6 100644 --- a/pkg/storage/backend/backend.go +++ b/pkg/storage/backend/backend.go @@ -11,6 +11,7 @@ import ( "github.com/weaveworks/libgitops/pkg/serializer" "github.com/weaveworks/libgitops/pkg/storage" "github.com/weaveworks/libgitops/pkg/storage/core" + "go.uber.org/multierr" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/controller-runtime/pkg/client" @@ -260,8 +261,12 @@ func (b *Generic) Create(ctx context.Context, obj Object) error { return err } - // Do not create it if it already exists - if b.storage.Exists(ctx, id) { + // Do not create the object if it already exists. + exists, err := b.storage.Exists(ctx, id) + if err != nil { + return err + } + if exists { return core.NewErrAlreadyExists(id) } @@ -293,9 +298,10 @@ func (b *Generic) Update(ctx context.Context, obj Object) error { // If the cont return err } - // Require that the object already exists - if !b.storage.Exists(ctx, id) { - return core.NewErrNotFound(id) + // Require that the object already exists. If err != nil, + // exists == false, hence it's enough to check for !exists + if exists, err := b.storage.Exists(ctx, id); !exists { + return multierr.Combine(core.NewErrNotFound(id), err) } // Validate that the change is ok @@ -360,9 +366,10 @@ func (b *Generic) Delete(ctx context.Context, obj Object) error { return err } - // Verify it did exist - if !b.storage.Exists(ctx, id) { - return core.NewErrNotFound(id) + // Verify it did exist. If err != nil, + // exists == false, hence it's enough to check for !exists + if exists, err := b.storage.Exists(ctx, id); !exists { + return multierr.Combine(core.NewErrNotFound(id), err) } // Validate that the change is ok diff --git a/pkg/storage/client/interfaces.go b/pkg/storage/client/interfaces.go index dc80e78f..63f5cb79 100644 --- a/pkg/storage/client/interfaces.go +++ b/pkg/storage/client/interfaces.go @@ -30,6 +30,13 @@ type Reader interface { BackendReader() backend.Reader } +type EventReader interface { + Reader + // If ctx points to a tag; then only tag updates are followed + // If ctx points to a branch; then updates to that branch are included + client.WithWatch +} + type Writer interface { client.Writer BackendWriter() backend.Writer diff --git a/pkg/storage/client/transactional/client.go b/pkg/storage/client/transactional/client.go index f5a93e7b..ddc78f5a 100644 --- a/pkg/storage/client/transactional/client.go +++ b/pkg/storage/client/transactional/client.go @@ -7,12 +7,13 @@ import ( "strings" "sync" - "github.com/sirupsen/logrus" + "github.com/go-logr/logr" "github.com/weaveworks/libgitops/pkg/storage/backend" "github.com/weaveworks/libgitops/pkg/storage/client" - "github.com/weaveworks/libgitops/pkg/storage/client/transactional/commit" + "github.com/weaveworks/libgitops/pkg/storage/commit" "github.com/weaveworks/libgitops/pkg/storage/core" "go.uber.org/atomic" + "k8s.io/apimachinery/pkg/types" utilerrs "k8s.io/apimachinery/pkg/util/errors" ) @@ -31,10 +32,10 @@ func NewGeneric(c client.Client, manager TransactionManager) (Client, error) { txHooks: &MultiTransactionHook{}, commitHooks: &MultiCommitHook{}, manager: manager, - txs: make(map[string]*atomic.Bool), + txs: make(map[types.UID]*atomic.Bool), txsMu: &sync.Mutex{}, } - return &genericWithRef{g, commit.Default()}, nil + return &genericWithRef{g, nil, commit.Default()}, nil } type generic struct { @@ -49,59 +50,65 @@ type generic struct { // +required manager TransactionManager - txs map[string]*atomic.Bool + txs map[types.UID]*atomic.Bool txsMu *sync.Mutex } type genericWithRef struct { *generic - ref commit.Ref + hash commit.Hash + ref commit.Ref } -func (c *genericWithRef) AtRef(ref commit.Ref) Client { - return &genericWithRef{c.generic, ref} +func (c *genericWithRef) AtHash(h commit.Hash) Client { + return &genericWithRef{generic: c.generic, hash: h, ref: c.ref} } -func (c *genericWithRef) AtSymbolicRef(symbolic string) Client { - return c.AtRef(commit.At(symbolic)) +func (c *genericWithRef) AtRef(symbolic commit.Ref) Client { + // TODO: Invalid (programmer error) to pass symbolic == nil + return &genericWithRef{generic: c.generic, hash: c.hash, ref: symbolic} } func (c *genericWithRef) CurrentRef() commit.Ref { return c.ref } - -/* -type txLockKeyImpl struct{} - -var txLockKey = txLockKeyImpl{} - -type txLock struct { - // mode specifies what transaction mode is used; Atomic or AllowReading. - //mode TxMode - // active == 1 means "transaction active, mu is locked for writing" - // active == 0 means "transaction has stopped, mu has been unlocked" - //active uint32 - active *atomic.Bool -}*/ +func (c *genericWithRef) CurrentHash() (commit.Hash, error) { + // Use the fixed hash if set + if c.hash != nil { + return c.hash, nil + } + // Otherwise, lookup the symbolic + return c.ref.Resolve(c.manager.RefResolver()) +} func (c *genericWithRef) Get(ctx context.Context, key core.ObjectKey, obj client.Object) error { - return c.lockAndRead(ctx, func(ctx context.Context) error { + return c.defaultCtxCommitRef(ctx, func(ctx context.Context) error { return c.c.Get(ctx, key, obj) }) } func (c *genericWithRef) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - return c.lockAndRead(ctx, func(ctx context.Context) error { + return c.defaultCtxCommitRef(ctx, func(ctx context.Context) error { return c.c.List(ctx, list, opts...) }) } -/*func (c *genericWithRef) lockForBranch(branch string) (syncutil.LockWithData, *txLock, bool) { - lck := c.lockMap.LockByName(branch) - txState, ok := lck.QLoad(txLockKey).(*txLock) - return lck, txState, ok -}*/ +// defaultCtxCommitRef makes sure that there's either commit.Hash registered with the context when reading +// TODO: In the future, shall filesystems also support commit.Ref? +func (c *genericWithRef) defaultCtxCommitRef(ctx context.Context, callback func(ctx context.Context) error) error { + // If ctx already specifies an immutable version to read, use it + if _, ok := commit.GetHash(ctx); ok { + return callback(ctx) + } + // If ctx specifies a symbolic target, resolve it + if ref, ok := commit.GetRef(ctx); ok { + h, err := ref.Resolve(c.manager.RefResolver()) + if err != nil { + return err + } + return callback(commit.WithHash(ctx, h)) + } -func (c *genericWithRef) lockAndRead(ctx context.Context, callback func(ctx context.Context) error) error { - h, err := c.ref.Resolve(c.manager.RefResolver()) + // Otherwise, look it up based on this client's data + h, err := c.CurrentHash() if err != nil { return err } @@ -111,48 +118,31 @@ func (c *genericWithRef) lockAndRead(ctx context.Context, callback func(ctx cont return callback(commit.WithHash(ctx, h)) } -func (c *genericWithRef) txStateByName(name string) *atomic.Bool { +func (c *genericWithRef) txStateByUID(uid types.UID) *atomic.Bool { // c.txsMu guards reads and writes of the c.txs map c.txsMu.Lock() defer c.txsMu.Unlock() // Check if information about a transaction on this branch exists. - state, ok := c.txs[name] + state, ok := c.txs[uid] if ok { return state } // if not, grow the txs map by one and return it - c.txs[name] = atomic.NewBool(false) - return c.txs[name] + c.txs[uid] = atomic.NewBool(false) + return c.txs[uid] } func (c *genericWithRef) initTx(ctx context.Context, info TxInfo) (context.Context, txFunc, error) { - // Get the head branch lock and status - //lck := c.lockMap.LockByName(info.HeadBranch) - - // Wait for all reads to complete (in the case of the atomic more), - // and then lock for writing. For non-atomic mode this uses the mutex - // as it is modifying txState, and two transactions must not run at - // the same time for the same branch. - // - // Always lock mu when a transaction is running on this branch, - // regardless of mode. If atomic mode is enabled, this also waits - // on any reads happening at this moment. For all modes, this ensures - // transactions happen in order. - /*lck.Lock() - txState := &txLock{ - active: 1, // set tx state to "active" - //mode: info.Options.Mode, // declare what transaction mode is used - } - lck.Store(txLockKey, txState)*/ + log := logr.FromContextOrDiscard(ctx) - active := c.txStateByName(info.HeadBranch) + active := c.txStateByUID(info.Target.UUID()) // If active == false, then this will switch active => true and return true // If active == true, then no operation will take place, and false is returned - // In other words, if false is returned, a transaction is ongoing and we should - // return a temporal error + // In other words, if false is returned, a transaction with this UID is ongoing. + // However, a UID conflict is very unlikely, given randomness and length of the UID if !active.CAS(false, true) { - // TODO: Is this the right way? - return nil, nil, errors.New("transaction is already ongoing") + // TODO: Avoid this possibility + return nil, nil, errors.New("should never happen; UID conflict") } // Create a child context with a timeout @@ -162,10 +152,12 @@ func (c *genericWithRef) initTx(ctx context.Context, info TxInfo) (context.Conte cleanupFunc := func() error { // Cleanup after the transaction if err := c.cleanupAfterTx(ctx, &info); err != nil { - return fmt.Errorf("Failed to cleanup branch %s after tx: %v", info.HeadBranch, err) + return fmt.Errorf("Failed to cleanup branch %s after tx: %v", info.Target.DestBranch(), err) } - // Unlock the mutex so new transactions can take place on this branch - //lck.Unlock() + // Avoid leaking memory by growing c.txs infinitely + c.txsMu.Lock() + delete(c.txs, info.Target.UUID()) + c.txsMu.Unlock() return nil } @@ -177,7 +169,7 @@ func (c *genericWithRef) initTx(ctx context.Context, info TxInfo) (context.Conte // once, regardless of transaction end cause. if active.CAS(true, false) { if err := cleanupFunc(); err != nil { - logrus.Errorf("Failed to cleanup after tx timeout: %v", err) + log.Error(err, "failed to cleanup after tx timeout") } } }() @@ -240,6 +232,8 @@ func (c *genericWithRef) Transaction(ctx context.Context, headBranch string, opt var ErrVersionRefIsImmutable = errors.New("cannot execute transaction against immutable version ref") func (c *genericWithRef) transaction(ctx context.Context, headBranch string, opts ...TxOption) (Tx, error) { + log := logr.FromContextOrDiscard(ctx) + // Get the immutable base version hash baseHash, err := c.ref.Resolve(c.manager.RefResolver()) if err != nil { @@ -255,28 +249,28 @@ func (c *genericWithRef) transaction(ctx context.Context, headBranch string, opt headBranch += suffix } - logrus.Debugf("Base commit hash: %q. Head branch: %q.", baseHash, headBranch) + log.V(2).Info("Base commit hash: %q. Head branch: %q.", baseHash, headBranch) // Parse options o := defaultTxOptions().ApplyOptions(opts) + target := commit.NewMutableTarget(headBranch, baseHash) info := TxInfo{ - BaseCommit: baseHash, - HeadBranch: headBranch, - Options: *o, + Target: target, + Options: *o, } // Register the head branch with the context // TODO: We should register all of TxInfo here instead, or ...? - ctxWithHeadBranch := commit.WithMutable(ctx, commit.NewMutable(headBranch)) + ctxWithDestBranch := commit.WithMutableTarget(ctx, target) // Initialize the transaction - ctxWithDeadline, cleanupFunc, err := c.initTx(ctxWithHeadBranch, info) + ctxWithDeadline, cleanupFunc, err := c.initTx(ctxWithDestBranch, info) if err != nil { return nil, err } // Run pre-tx checks and create the new branch - // TODO: Use multierr? + // TODO: Use uber's multierr? if err := utilerrs.NewAggregate([]error{ c.TransactionHookChain().PreTransactionHook(ctxWithDeadline, info), c.manager.Init(ctxWithDeadline, &info), diff --git a/pkg/storage/client/transactional/commit/commit.go b/pkg/storage/client/transactional/commit/commit.go deleted file mode 100644 index 0cc32a73..00000000 --- a/pkg/storage/client/transactional/commit/commit.go +++ /dev/null @@ -1,215 +0,0 @@ -package commit - -import ( - "context" - "encoding/hex" - "errors" - "fmt" - "time" - - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/uuid" -) - -type Commit interface { - Hash() Hash - Author() Signature - Message() Message - Parents() []Hash -} - -type Request interface { - Author() Signature - Message() Message - Validate() error -} - -type Signature interface { - // Name describes the author's name (e.g. as per git config) - // +required - Name() string - // Email describes the author's email (e.g. as per git config). - // It is optional generally, but might be required by some specific - // implementations. - // +optional - Email() string - // When is the timestamp of the signature. - // +optional - When() *time.Time - // The String() method must return a (ideally both human- and machine- - // readable) concatenated string including the name and email (if - // applicable) of the author. - fmt.Stringer -} - -type Message interface { - // Title describes the change concisely, so it can be used e.g. as - // a commit message or PR title. Certain implementations might enforce - // character limits on this string. - // +required - Title() string - // Description contains optional extra, more detailed information - // about the change. - // +optional - Description() string - // The String() method must return a (ideally both human- and machine- - // readable) concatenated string including the title and description - // (if applicable) of the author. - fmt.Stringer -} - -type Hash interface { - Hash() []byte - String() string -} - -func WithHash(ctx context.Context, h Hash) context.Context { - if h == nil { - return ctx - } - return context.WithValue(ctx, hashCtxKey, h) -} - -func GetHash(ctx context.Context) Hash { - return ctx.Value(hashCtxKey).(Hash) -} - -type hashCtxKeyStruct struct{} - -var hashCtxKey = hashCtxKeyStruct{} - -type Ref interface { - Resolve(RefResolver) (Hash, error) -} - -type RefResolver interface { - ResolveSymbolic(SymbolicRef) (Hash, error) -} - -type Resolver interface { - ResolveHash(Hash) (Commit, error) -} - -func SHA1(h [20]byte) Hash { - b := make([]byte, 20) - copy(b, h[:]) - return &hash{hash: b, encoded: hex.EncodeToString(b)} -} - -func FromSHA1(hash string) Ref { - return &sha1Ref{ref: hash} -} - -func At(symbolic string) SymbolicRef { - return &symbolicRef{SymbolicTypeUnknown, symbolic, 0} -} - -func Default() SymbolicRef { - return AtBranch("") // Signifies the default branch -} - -func AtBranch(b string) SymbolicRef { - return Before(b, 0) -} - -func Before(b string, n uint8) SymbolicRef { - return &symbolicRef{SymbolicTypeBranch, b, n} -} - -func AtTag(t string) SymbolicRef { - return &symbolicRef{SymbolicTypeTag, t, 0} -} - -func AtHash(h string) SymbolicRef { - return &symbolicRef{SymbolicTypeHash, h, 0} -} - -type SymbolicType int - -const ( - SymbolicTypeUnknown SymbolicType = iota - SymbolicTypeHash - // A branch is generally a mutable - SymbolicTypeBranch - SymbolicTypeTag -) - -type SymbolicRef interface { - Ref - - String() string - Index() uint8 - Type() SymbolicType -} - -type hash struct { - hash []byte - encoded string -} - -func (h *hash) Hash() []byte { return h.hash } -func (h *hash) String() string { return h.encoded } - -type sha1Ref struct { - ref string -} - -func (r *sha1Ref) Resolve(RefResolver) (Hash, error) { - b, err := hex.DecodeString(r.ref) - if err != nil { - return nil, err - } - return &hash{hash: b, encoded: r.ref}, nil -} - -type symbolicRef struct { - st SymbolicType - ref string - index uint8 -} - -func (r *symbolicRef) String() string { return r.ref } -func (r *symbolicRef) Index() uint8 { return r.index } -func (r *symbolicRef) Type() SymbolicType { return r.st } -func (r *symbolicRef) Resolve(res RefResolver) (Hash, error) { - // This is probably resolver-specific - if r.index != 0 && r.st != SymbolicTypeUnknown && r.st != SymbolicTypeBranch { - return nil, errors.New("index only works for branches") - } - return res.ResolveSymbolic(r) -} - -type MutableTarget interface { - HeadBranch() string - BaseCommit() Hash - UUID() types.UID -} - -func NewMutableTarget(headBranch string, baseCommit Hash) MutableTarget { - return &mutableTarget{headBranch: headBranch, baseCommit: baseCommit, uuid: uuid.New()} -} - -type mutableTarget struct { - headBranch string - baseCommit Hash - uuid types.UID -} - -func (m *mutableTarget) HeadBranch() string { return m.headBranch } -func (m *mutableTarget) BaseCommit() Hash { return m.baseCommit } -func (m *mutableTarget) UUID() types.UID { return m.uuid } - -func WithMutableTarget(ctx context.Context, m MutableTarget) context.Context { - if m == nil { - return ctx - } - return context.WithValue(ctx, mutableCtxKey, m) -} - -func GetMutableTarget(ctx context.Context) MutableTarget { - return ctx.Value(mutableCtxKey).(MutableTarget) -} - -type mutableCtxKeyStruct struct{} - -var mutableCtxKey = mutableCtxKeyStruct{} diff --git a/pkg/storage/client/transactional/distributed/client.go b/pkg/storage/client/transactional/distributed/client.go index 07817dc8..230e4958 100644 --- a/pkg/storage/client/transactional/distributed/client.go +++ b/pkg/storage/client/transactional/distributed/client.go @@ -6,16 +6,17 @@ import ( "sync" "time" - "github.com/sirupsen/logrus" + "github.com/go-logr/logr" "github.com/weaveworks/libgitops/pkg/storage/client" "github.com/weaveworks/libgitops/pkg/storage/client/transactional" + "github.com/weaveworks/libgitops/pkg/storage/commit" "github.com/weaveworks/libgitops/pkg/storage/core" "k8s.io/apimachinery/pkg/util/wait" ) // NewClient creates a new distributed Client using the given underlying transactional Client, // remote, and options that configure how the Client should respond to network partitions. -func NewClient(c transactional.Client, remote Remote, opts ...ClientOption) (*Generic, error) { +func NewClient(c transactional.Client, remote Remote, opts ...ClientOption) (Client, error) { if c == nil { return nil, fmt.Errorf("%w: c is mandatory", core.ErrInvalidParameter) } @@ -25,23 +26,26 @@ func NewClient(c transactional.Client, remote Remote, opts ...ClientOption) (*Ge o := defaultOptions().ApplyOptions(opts) - g := &Generic{ - Client: c, + g := &generic{ + GenericClient: c, remote: remote, opts: *o, branchLocks: make(map[string]*branchLock), branchLocksMu: &sync.Mutex{}, } + // Construct the default client + dc := &genericWithRef{g, nil, commit.Default()} + // Register ourselves to hook into the transactional.Client's operations - c.CommitHookChain().Register(g) - c.TransactionHookChain().Register(g) + c.CommitHookChain().Register(dc) + c.TransactionHookChain().Register(dc) - return g, nil + return dc, nil } -type Generic struct { - transactional.Client +type generic struct { + transactional.GenericClient remote Remote opts ClientOptions // branchLocks maps a given branch to a given lock the state of the branch @@ -50,6 +54,31 @@ type Generic struct { branchLocksMu *sync.Mutex } +type genericWithRef struct { + *generic + hash commit.Hash + ref commit.Ref +} + +func (c *genericWithRef) AtHash(h commit.Hash) Client { + return &genericWithRef{generic: c.generic, hash: h, ref: c.ref} +} +func (c *genericWithRef) AtRef(symbolic commit.Ref) Client { + // TODO: Invalid (programmer error) to pass symbolic == nil + return &genericWithRef{generic: c.generic, hash: c.hash, ref: symbolic} +} +func (c *genericWithRef) CurrentRef() commit.Ref { + return c.ref +} +func (c *genericWithRef) CurrentHash() (commit.Hash, error) { + // Use the fixed hash if set + if c.hash != nil { + return c.hash, nil + } + // Otherwise, lookup the symbolic + return c.ref.Resolve(c.TransactionManager().RefResolver()) +} + type branchLockKeyImpl struct{} var branchLockKey = branchLockKeyImpl{} @@ -57,7 +86,7 @@ var branchLockKey = branchLockKeyImpl{} type branchLock struct { // mu should be write-locked whenever the branch is actively running any // function from the remote - // mu *sync.RWMutex + mu *sync.RWMutex // lastPull is guarded by mu, before reading, one should RLock mu lastPull time.Time } @@ -68,66 +97,85 @@ type branchLock struct { while a tx is going on for a branch, they just need to specify the direct commit. */ -func (c *Generic) Get(ctx context.Context, key core.ObjectKey, obj client.Object) error { - return c.readWhenPossible(ctx, func() error { - return c.Client.Get(ctx, key, obj) +func (c *genericWithRef) Get(ctx context.Context, key core.ObjectKey, obj client.Object) error { + return c.readWhenPossible(ctx, func(ctx context.Context) error { + return c.GenericClient.Get(ctx, key, obj) }) } -func (c *Generic) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { - return c.readWhenPossible(ctx, func() error { - return c.Client.List(ctx, list, opts...) +func (c *genericWithRef) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + return c.readWhenPossible(ctx, func(ctx context.Context) error { + return c.GenericClient.List(ctx, list, opts...) }) } -func (c *Generic) readWhenPossible(ctx context.Context, operation func() error) error { - ref := core.GetVersionRef(ctx) - // If the versionref is immutable, we can read directly. - if ref.IsImmutable() { - return operation() +func (c *genericWithRef) readWhenPossible(ctx context.Context, operation func(context.Context) error) error { + // If the read is immutable, just proceed + if _, ok := commit.GetHash(ctx); ok { + return operation(ctx) + } + if c.hash != nil { + return operation(commit.WithHash(ctx, c.hash)) } - // Check if we need to do a pull before - if c.needsResync(ref, c.opts.CacheValidDuration) { - // Try to pull the remote branch. If it fails, use returnErr to figure out if + // Use the ref from the context, if set, otherwise default to the one configured + // in this Client. + ref, ok := commit.GetRef(ctx) + if !ok { + ref = c.ref + } + + // If the read is reference-based; look it up if it needs resync first + if c.needsResync(ref) { + // Try to pull the remote ref. If it fails, use returnErr to figure out if // this (depending on the configured PACELC mode) is a critical error, or if we // should continue with the read - if err := c.pull(ctx, branch); err != nil { + if err := c.pull(ctx, ref); err != nil { if criticalErr := c.returnErr(err); criticalErr != nil { return criticalErr } } } // Do the read operation - return operation() + return operation(commit.WithRef(ctx, ref)) } -func (c *Generic) getBranchLockInfo(ref core.VersionRef) *branchLock { - // We "know" this is a "branch", as immutable references are no-ops in readWhenPossible - branch := ref.VersionRef() +// makes a string representation of the ref that is used to uniquely determine +// if two refs are "similar" (i.e. are touching the same resource to be pulled) +func refToStr(ref commit.Ref) string { + return fmt.Sprintf("%s-%s", ref.Type(), ref.Target()) +} +func (c *genericWithRef) getBranchLockInfo(ref commit.Ref) *branchLock { c.branchLocksMu.Lock() defer c.branchLocksMu.Unlock() - // Check if there exists a lock for that branch - info, ok := c.branchLocks[branch] + // Check if there exists a lock for that ref + str := refToStr(ref) + info, ok := c.branchLocks[str] if ok { return info } // Write to the branchLocks map - c.branchLocks[branch] = &branchLock{ + c.branchLocks[str] = &branchLock{ mu: &sync.RWMutex{}, } - return c.branchLocks[branch] + return c.branchLocks[str] } -func (c *Generic) needsResync(ref core.VersionRef, d time.Duration) bool { +func (c *genericWithRef) needsResync(ref commit.Ref) bool { + // Always resync if the cache is always directly invalidated + cacheValid := c.opts.CacheValidDuration + if cacheValid == 0 { + return true + } + lck := c.getBranchLockInfo(ref) // Lock while reading the last resync time lck.mu.RLock() defer lck.mu.RUnlock() // Resync if there has been no sync so far, or if the last resync was too long ago - return lck.lastPull.IsZero() || time.Since(lck.lastPull) > d + return lck.lastPull.IsZero() || time.Since(lck.lastPull) > cacheValid } // StartResyncLoop starts a resync loop for the given branches for @@ -138,7 +186,7 @@ func (c *Generic) needsResync(ref core.VersionRef, d time.Duration) bool { // be positive, and non-zero. // // resyncBranches specifies what branches to resync. The default is -// []string{""}, i.e. only the "default" branch. +// []commit.Ref{commit.Default()}, i.e. only the "default" branch. // // ctx should be used to cancel the loop, if needed. // @@ -147,39 +195,45 @@ func (c *Generic) needsResync(ref core.VersionRef, d time.Duration) bool { // you need. The branches will be pulled synchronously in order. The // resync interval is non-sliding, which means that the interval // includes the time of the operations. -func (c *Generic) StartResyncLoop(ctx context.Context, resyncCacheInterval time.Duration, resyncBranches ...string) { +func (c *genericWithRef) StartResyncLoop(ctx context.Context, resyncCacheInterval time.Duration, sync ...commit.Ref) { + log := c.logger(ctx) // Only start this loop if resyncCacheInterval > 0 if resyncCacheInterval <= 0 { - logrus.Warn("No need to start the resync loop; resyncCacheInterval <= 0") + log.Info("No need to start the resync loop; resyncCacheInterval <= 0") return } // If unset, only sync the default branch. - if resyncBranches == nil { - resyncBranches = []string{""} + if sync == nil { + sync = []commit.Ref{commit.Default()} } // Start the resync goroutine - go c.resyncLoop(ctx, resyncCacheInterval, resyncBranches) + go c.resyncLoop(ctx, resyncCacheInterval, sync) } -func (c *Generic) resyncLoop(ctx context.Context, resyncCacheInterval time.Duration, resyncBranches []string) { - logrus.Debug("Starting the resync loop...") +func (c *genericWithRef) logger(ctx context.Context) logr.Logger { + return logr.FromContextOrDiscard(ctx).WithName("distributed.Client") +} + +func (c *genericWithRef) resyncLoop(ctx context.Context, resyncCacheInterval time.Duration, sync []commit.Ref) { + log := c.logger(ctx).WithName("resyncLoop") + log.V(2).Info("starting resync loop") wait.NonSlidingUntilWithContext(ctx, func(_ context.Context) { - for _, branch := range resyncBranches { - logrus.Tracef("resyncLoop: Will perform pull operation on branch: %q", branch) + for _, branch := range sync { + log.V(2).Info("resyncLoop: Will perform pull operation on branch: %q", branch) // Perform a fetch, pull & checkout of the new revision if err := c.pull(ctx, branch); err != nil { - logrus.Errorf("resyncLoop: pull failed with error: %v", err) + log.Error(err, "remote pull failed") return } } }, resyncCacheInterval) - logrus.Info("Exiting the resync loop...") + log.V(2).Info("context cancelled, exiting resync loop") } -func (c *Generic) pull(ctx context.Context, ref core.VersionRef) error { +func (c *genericWithRef) pull(ctx context.Context, ref commit.Ref) error { // Need to get the branch-specific lock variable lck := c.getBranchLockInfo(ref) // Write-lock while this operation is in progress @@ -187,74 +241,55 @@ func (c *Generic) pull(ctx context.Context, ref core.VersionRef) error { defer lck.mu.Unlock() // Create a new context that times out after the given duration - pullCtx, cancel := context.WithTimeout(ctx, c.opts.PullTimeout) + ctx, cancel := context.WithTimeout(ctx, c.opts.PullTimeout) defer cancel() - // Make a ctx for the given branch - ctxForBranch := core.WithMutableVersionRef(pullCtx, branch) - if err := c.remote.Pull(ctxForBranch); err != nil { + // Make a ctx with the given ref + ctx = commit.WithRef(ctx, ref) + if err := c.remote.Pull(ctx); err != nil { return err } // Register the timestamp into the lock lck.lastPull = time.Now() - - // All good return nil } -func (c *Generic) PreTransactionHook(ctx context.Context, info transactional.TxInfo) error { +func (c *genericWithRef) PreTransactionHook(ctx context.Context, info transactional.TxInfo) error { // We count on ctx having the VersionRef registered for the head branch - // Lock the branch for writing, if supported by the remote - // If the lock fails, we DO NOT try to pull, but just exit (either with err or a nil error, - // depending on the configured PACELC mode) - // TODO: Can we rely on the timeout being exact enough here? - // TODO: How to do this before the branch even exists...? - if err := c.lock(ctx, info.Options.Timeout); err != nil { - return c.returnErr(err) - } - // Always Pull the _base_ branch before a transaction, to be up-to-date // before creating the new head branch - if err := c.pull(ctx, info.Base); err != nil { + ref := commit.AtBranch(info.Target.DestBranch()) + if err := c.pull(ctx, ref); err != nil { + // TODO: Consider a wrapping closure here instead of having to remember to + // wrap the error in returnErr return c.returnErr(err) } - // All good return nil } -func (c *Generic) PreCommitHook(ctx context.Context, commit transactional.Commit, info transactional.TxInfo) error { +func (c *genericWithRef) PreCommitHook(context.Context, transactional.TxInfo, commit.Request) error { return nil // nothing to do here } -func (c *Generic) PostCommitHook(ctx context.Context, _ transactional.Commit, _ transactional.TxInfo) error { +func (c *genericWithRef) PostCommitHook(ctx context.Context, info transactional.TxInfo, _ commit.Request) error { // Push the branch in the ctx - if err := c.push(ctx); err != nil { + ref := commit.AtBranch(info.Target.DestBranch()) + if err := c.push(ctx, ref); err != nil { return c.returnErr(err) } return nil } -func (c *Generic) PostTransactionHook(ctx context.Context, info transactional.TxInfo) error { - // Unlock the head branch, if supported - if err := c.unlock(ctx); err != nil { - return c.returnErr(err) - } - - return nil +func (c *genericWithRef) PostTransactionHook(context.Context, transactional.TxInfo) error { + return nil // nothing to do here; if we had locking capability one would unlock } -func (c *Generic) Remote() Remote { - return c.remote -} +func (c *genericWithRef) Remote() Remote { return c.remote } -func (c *Generic) branchFromCtx(ctx context.Context) string { - return core.GetVersionRef(ctx).Branch() -} - -func (c *Generic) returnErr(err error) error { +func (c *genericWithRef) returnErr(err error) error { // If RemoteErrorStream isn't defined, just pass the error through if c.opts.RemoteErrorStream == nil { return err @@ -266,7 +301,43 @@ func (c *Generic) returnErr(err error) error { return nil } -func (c *Generic) lock(ctx context.Context, d time.Duration) error { +func (c *genericWithRef) push(ctx context.Context, ref commit.Ref) error { + // Need to get the branch-specific lock variable + lck := c.getBranchLockInfo(ref) + // Write-lock while this operation is in progress + lck.mu.Lock() + defer lck.mu.Unlock() + + // Create a new context that times out after the given duration + ctx, cancel := context.WithTimeout(ctx, c.opts.PushTimeout) + defer cancel() + + // Push the head branch using the remote + // If the Push fails, don't execute any other later statements + return c.remote.Push(ctx) +} + +/* + +func (c *genericWithRef) branchFromCtx(ctx context.Context) string { + return core.GetVersionRef(ctx).Branch() +} + +// Lock the branch for writing, if supported by the remote + // If the lock fails, we DO NOT try to pull, but just exit (either with err or a nil error, + // depending on the configured PACELC mode) + // TODO: Can we rely on the timeout being exact enough here? + // TODO: How to do this before the branch even exists...? + if err := c.lock(ctx, info.Options.Timeout); err != nil { + return c.returnErr(err) + } + +// Unlock the head branch, if supported + if err := c.unlock(ctx); err != nil { + return c.returnErr(err) + } + +func (c *genericWithRef) lock(ctx context.Context, d time.Duration) error { lr, ok := c.remote.(LockableRemote) if !ok { return nil @@ -285,7 +356,7 @@ func (c *Generic) lock(ctx context.Context, d time.Duration) error { return lr.Lock(lockCtx, d) } -func (c *Generic) unlock(ctx context.Context) error { +func (c *genericWithRef) unlock(ctx context.Context) error { lr, ok := c.remote.(LockableRemote) if !ok { return nil @@ -303,22 +374,4 @@ func (c *Generic) unlock(ctx context.Context) error { return lr.Unlock(unlockCtx) } - -func (c *Generic) push(ctx context.Context) error { - // Need to get the branch-specific lock variable - lck := c.getBranchLockInfo(c.branchFromCtx(ctx)) - // Write-lock while this operation is in progress - lck.mu.Lock() - defer lck.mu.Unlock() - - // Create a new context that times out after the given duration - pushCtx, cancel := context.WithTimeout(ctx, c.opts.PushTimeout) - defer cancel() - - // Push the head branch using the remote - // If the Push fails, don't execute any other later statements - if err := c.remote.Push(pushCtx); err != nil { - return err - } - return nil -} +*/ diff --git a/pkg/storage/client/transactional/distributed/git/filesystem.go b/pkg/storage/client/transactional/distributed/git/filesystem.go index d05f38e3..4c8160f3 100644 --- a/pkg/storage/client/transactional/distributed/git/filesystem.go +++ b/pkg/storage/client/transactional/distributed/git/filesystem.go @@ -2,65 +2,588 @@ package git import ( "context" + "errors" + "fmt" + "io" + "io/fs" + "io/ioutil" "os" "path/filepath" + "sync" + "time" + "github.com/fluxcd/go-git-providers/gitprovider" "github.com/go-git/go-git/v5" - "github.com/spf13/afero" + "github.com/go-git/go-git/v5/config" + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/object" + "github.com/go-logr/logr" + "github.com/weaveworks/libgitops/pkg/storage/client/transactional" + "github.com/weaveworks/libgitops/pkg/storage/client/transactional/distributed" + "github.com/weaveworks/libgitops/pkg/storage/commit" + "github.com/weaveworks/libgitops/pkg/storage/filesystem" + "github.com/weaveworks/libgitops/pkg/util/structerr" + "go.uber.org/multierr" + "k8s.io/apimachinery/pkg/types" ) -type Filesystem struct { - git *goGit +const ( + ErrImmutableFilesystem = stringError("git clone is immutable; start a transaction to mutate") +) + +type stringError string + +func (s stringError) Error() string { return string(s) } + +var ( + _ filesystem.Filesystem = &Git{} + _ transactional.TransactionManager = &Git{} + _ distributed.Remote = &Git{} +) + +func New(ctx context.Context, repoRef gitprovider.RepositoryRef, opts ...Option) (*Git, error) { + log := logr.FromContextOrDiscard(ctx) + + o := defaultOpts().ApplyOptions(opts) + + tmpDir, err := ioutil.TempDir("", "libgitops") + if err != nil { + return nil, err + } + log.V(2).Info("created temp directory to store Git clones in", "dir", tmpDir) + tmpDirTyped := rootDir(tmpDir) + + transportType := gitprovider.TransportTypeHTTPS // default + if o.AuthMethod != nil { + // TODO: parse the URL instead + transportType = o.AuthMethod.TransportType() + } + cloneURL := repoRef.GetCloneURL(transportType) + + cloneOpts := &git.CloneOptions{ + URL: cloneURL, + Auth: o.AuthMethod, + SingleBranch: true, + NoCheckout: true, + //Depth: 1, // ref: https://github.com/go-git/go-git/issues/207 + RecurseSubmodules: 0, + Progress: nil, + Tags: git.NoTags, + } + if o.MainBranch != "" { + cloneOpts.ReferenceName = plumbing.NewBranchReferenceName(o.MainBranch) + } + + log.Info("cloning the repository", "repo-ref", repoRef) + // Do a base clone to the temporary directory + bareDir := filepath.Join(tmpDir, "root.git") + repo, err := git.PlainCloneContext(ctx, bareDir, true, cloneOpts) + // Handle errors + if errors.Is(err, context.DeadlineExceeded) { + return nil, fmt.Errorf("git clone operation timed out: %w", err) + } else if errors.Is(err, context.Canceled) { + return nil, fmt.Errorf("git clone was cancelled: %w", err) + } else if err != nil { + return nil, fmt.Errorf("git clone error: %v", err) + } + + // Enable the uploadpack.allowReachableSHA1InWant option + // http://git-scm.com/docs/git-config#Documentation/git-config.txt-uploadpackallowReachableSHA1InWant + c, err := repo.Config() + if err != nil { + return nil, err + } + gitCfgBytes, _ := c.Marshal() + log.V(2).Info("git config before", "git-config", string(gitCfgBytes)) + + c.Raw.Section("uploadpack").SetOption("allowReachableSHA1InWant", "true") + + gitCfgBytes, _ = c.Marshal() + log.V(2).Info("git config after", "git-config", string(gitCfgBytes)) + + if err := repo.SetConfig(c); err != nil { + return nil, err + } + + // HEAD should be by default a symbolic reference to the main branch + // TODO: Does this exist for a bare repository? + r, err := repo.Head() + if err != nil { + return nil, err + } + mainBranch := string(r.Target()) + log.V(2).Info("got main branch", "main-branch", mainBranch) + + return &Git{ + Filesystem: filesystem.FromContext(&fileSystem{ + bareRepo: repo, + rootDir: tmpDirTyped, + defaultBranch: mainBranch, + }), + rootDir: tmpDirTyped, + bareDir: bareDir, + bareRepo: repo, + defaultBranch: mainBranch, + }, nil +} + +type rootDir string + +func (d rootDir) gitDirFor(target commit.MutableTarget) string { + return filepath.Join(string(d), string(target.UUID())) // +".git" TODO is this needed? +} + +// TODO: Add a FilesystemFor(dir string) Filesystem method +type Git struct { + filesystem.Filesystem + rootDir + bareDir string + bareRepo *git.Repository + defaultBranch string + + localClones map[types.UID]*localClone + localClonesMu *sync.Mutex +} + +type localClone struct { + repo *git.Repository + wt *git.Worktree + origin *git.Remote + target commit.MutableTarget +} + +func (g *Git) localCloneByUUID(uuid types.UID) (*localClone, bool) { + // c.txsMu guards reads and writes of the c.txs map + g.localClonesMu.Lock() + defer g.localClonesMu.Unlock() + + // Check if information about a transaction on this branch exists. + lc, ok := g.localClones[uuid] + if ok { + return lc, true + } + // if not, grow the localClones map by one and return it + g.localClones[uuid] = &localClone{} + return g.localClones[uuid], false } -func (f *Filesystem) RootDirectory() string { - return f.rootDir +var _ structerr.StructError = &OngoingTransactionError{} + +// Maybe move this to the transactional package? +type OngoingTransactionError struct { + Target commit.MutableTarget +} + +func (e *OngoingTransactionError) Error() string { + msg := "cannot start a transaction with an UUID that already exists" + if e.Target == nil { + return msg + } + return fmt.Sprintf("%s: %s (base: %s, target: %s)", msg, e.Target.UUID(), e.Target.BaseCommit(), e.Target.DestBranch()) +} + +func (e *OngoingTransactionError) Is(err error) bool { + _, ok := err.(*OngoingTransactionError) + return ok } -func (f *Filesystem) Checksum(_ context.Context, filename string) (string, error) { - // Get the latest commit that is touching this file - ci, err := f.git.repo.Log(&git.LogOptions{ - Order: git.LogOrderCommitterTime, - FileName: &filename, +func (g *Git) Init(ctx context.Context, tx *transactional.TxInfo) error { + target := tx.Target // TODO: Check for nil or not? + + lc, exists := g.localCloneByUUID(target.UUID()) + if exists { + return &OngoingTransactionError{Target: target} + } + + // Do a "git init", as per the instructions at + // https://stackoverflow.com/questions/31278902/how-to-shallow-clone-a-specific-commit-with-depth-1 + var err error + lc.repo, err = git.PlainInit(g.gitDirFor(target), false) + if err != nil { + return err + } + // Register the bare local clone as "origin" + lc.origin, err = lc.repo.CreateRemote(&config.RemoteConfig{ + Name: "origin", + URLs: []string{g.bareDir}, }) if err != nil { - return "", err + return err + } + // Fetch only this specific commit from the origin to HEAD, at depth 1 + refSpec := config.RefSpec(fmt.Sprintf("%s:refs/heads/HEAD", target.BaseCommit())) + if err := lc.origin.FetchContext(ctx, &git.FetchOptions{ + RefSpecs: []config.RefSpec{refSpec}, + Depth: 1, + Tags: git.NoTags, + }); err != nil { + return err } - commit, err := ci.Next() + // Now, check out the worktree + lc.wt, err = lc.repo.Worktree() if err != nil { - return "", err + return err + } + // Create a new branch from the fetched commit, with the head branch name + if err := lc.wt.Checkout(&git.CheckoutOptions{ + Hash: *hashToGoGit(target.BaseCommit()), + Branch: plumbing.NewBranchReferenceName(target.DestBranch()), + Create: true, + }); err != nil { + return err } - return commit.Hash.String(), nil + + return nil } -func (f *Filesystem) MkdirAll(_ context.Context, path string, perm os.FileMode) error { - return f.fs.MkdirAll(path, perm) +func (g *Git) Commit(ctx context.Context, tx *transactional.TxInfo, req commit.Request) error { + log := logr.FromContextOrDiscard(ctx) + target := tx.Target // TODO: Check for nil or not? + + lc, exists := g.localCloneByUUID(target.UUID()) + if exists { + return stringError("nonexistent mutable target") // TODO + } + + // TODO: Make sure this registers net-new files, too + if err := lc.wt.AddGlob("."); err != nil { + return err + } + + t := req.Author().When() + if t == nil { + now := time.Now() + t = &now + } + // TODO: This should be idempotent if the TransactionClient runs it over and over again + newCommit, err := lc.wt.Commit(req.Message().String(), &git.CommitOptions{ + Author: &object.Signature{ + Name: req.Author().Name(), + Email: req.Author().Email(), + When: *t, + }, + // TODO: SignKey + }) + if err != nil { + return err + } + log.V(2).Info("created commit with hash", "commit", newCommit.String()) + + refSpec := fmt.Sprintf("refs/heads/%s:refs/heads/%s", target.DestBranch(), target.DestBranch()) + if err := lc.origin.PushContext(ctx, &git.PushOptions{ + RefSpecs: []config.RefSpec{config.RefSpec(refSpec)}, + }); err != nil { + return err // TODO: Error handling for context cancellations etc. + } + log.V(2).Info("pushed refspec", "refspec", refSpec) + + return nil } -func (f *Filesystem) Remove(_ context.Context, name string) error { - return f.fs.Remove(name) +func (g *Git) Abort(ctx context.Context, tx *transactional.TxInfo) error { + log := logr.FromContextOrDiscard(ctx) + target := tx.Target // TODO: Check for nil or not? + + _, exists := g.localCloneByUUID(target.UUID()) + if !exists { + return stringError("nonexistent mutable target") // TODO + } + + // Removing the Git directory completely + dir := g.gitDirFor(target) + log.V(2).Info("removing local git directory clone", "dir", dir) + if err := os.RemoveAll(dir); err != nil { + return err + } + // TODO: Shall this be done regardless of the os.RemoveAll error? + g.localClonesMu.Lock() + delete(g.localClones, target.UUID()) + g.localClonesMu.Unlock() + return nil } -func (f *Filesystem) Stat(_ context.Context, name string) (os.FileInfo, error) { - return f.fs.Stat(name) +func (g *Git) Pull(ctx context.Context) error { + ref, ok := commit.GetRef(ctx) + if !ok { + return stringError("no commit.Ref given to Git.Pull") + } + var refName plumbing.ReferenceName + tagMode := git.NoTags + switch ref.Type() { + case commit.RefTypeTag: + refName = plumbing.NewTagReferenceName(ref.Target()) + tagMode = git.TagFollowing + case commit.RefTypeBranch: + refName = plumbing.NewBranchReferenceName(ref.Target()) + default: + return fmt.Errorf("Git.Pull cannot support commit.Ref.Type = %s", ref.Type()) + } + + return g.bareRepo.FetchContext(ctx, &git.FetchOptions{ + RemoteName: "origin", + RefSpecs: []config.RefSpec{refNameToSpec(refName)}, + Tags: tagMode, + // TODO: Do something with Depth here? + }) } -func (f *Filesystem) ReadDir(_ context.Context, dirname string) ([]os.FileInfo, error) { - return afero.ReadDir(f.fs, dirname) +func refNameToSpec(refName plumbing.ReferenceName) config.RefSpec { + return config.RefSpec(fmt.Sprintf("%s:%s", refName, refName)) } -func (f *Filesystem) Exists(_ context.Context, path string) (bool, error) { - return afero.Exists(f.fs, path) +func (g *Git) Push(ctx context.Context) error { + target, ok := commit.GetMutableTarget(ctx) + if !ok { + return stringError("no commit.MutableTarget given to Git.Push") + } + destRefName := plumbing.NewBranchReferenceName(target.DestBranch()) + return g.bareRepo.PushContext(ctx, &git.PushOptions{ + RemoteName: "origin", + RefSpecs: []config.RefSpec{refNameToSpec(destRefName)}, + }) } -func (f *Filesystem) ReadFile(_ context.Context, filename string) ([]byte, error) { - return afero.ReadFile(f.fs, filename) +var _ filesystem.ContextFS = &fileSystem{} + +type fileSystem struct { + bareRepo *git.Repository + rootDir + defaultBranch string } -func (f *Filesystem) WriteFile(_ context.Context, filename string, data []byte, perm os.FileMode) error { - return afero.WriteFile(f.fs, filename, data, perm) +func (f *fileSystem) ResolveRef(sr commit.Ref) (commit.Hash, error) { + var h plumbing.Hash + + switch sr.Type() { + case commit.RefTypeHash: + c, err := f.bareRepo.CommitObject(plumbing.NewHash(sr.Target())) + if err != nil { + return nil, err + } + h = c.Hash + case commit.RefTypeTag: + t, err := f.bareRepo.Tag(sr.Target()) + if err != nil { + return nil, err + } + h = t.Hash() + default: + ref := sr.Target() + if sr.Type() == commit.RefTypeBranch { + // Default the branch if left unset + if ref == "" { + // TODO: Get rid of this + ref = f.defaultBranch + } + if sr.Before() != 0 { + ref = fmt.Sprintf("%s~%d", sr.Target(), sr.Before()) + } + } + r, err := f.bareRepo.ResolveRevision(plumbing.Revision(ref)) + if err != nil { + return nil, err + } + h = *r + } + return hashFromGoGit(h, sr), nil +} + +func (f *fileSystem) GetRef(ctx context.Context) commit.Ref { + ref, ok := commit.GetRef(ctx) + if ok { + return ref + } + return commit.AtBranch(f.defaultBranch) +} + +func (f *fileSystem) RefResolver() commit.RefResolver { return f } + +func (f *fileSystem) mutableFSFor(ctx context.Context, target commit.MutableTarget) filesystem.FS { + return filesystem.NewOSFilesystem(f.gitDirFor(target)).WithContext(ctx) +} + +func hashToGoGit(h commit.Hash) *plumbing.Hash { + var ph plumbing.Hash + copy(ph[:], h.Hash()) + return &ph +} + +func hashFromGoGit(h plumbing.Hash, src commit.Ref) commit.Hash { + return commit.SHA1(h, src) +} + +func (f *fileSystem) hashFor(ctx context.Context) (*plumbing.Hash, error) { + h, ok := commit.GetHash(ctx) + if ok { + return hashToGoGit(h), nil + } + // TODO: Use f.bareRepo.HEAD() here instead? + return f.bareRepo.ResolveRevision(plumbing.Revision(f.defaultBranch)) +} + +func (f *fileSystem) MkdirAll(ctx context.Context, path string, perm os.FileMode) error { + if target, mutable := commit.GetMutableTarget(ctx); mutable { + return f.mutableFSFor(ctx, target).MkdirAll(path, perm) + } + return ErrImmutableFilesystem +} + +func (f *fileSystem) Remove(ctx context.Context, name string) error { + if target, mutable := commit.GetMutableTarget(ctx); mutable { + return f.mutableFSFor(ctx, target).Remove(name) + } + return ErrImmutableFilesystem } -func (f *Filesystem) Walk(_ context.Context, root string, walkFn filepath.WalkFunc) error { - return afero.Walk(f.fs, root, walkFn) +func (f *fileSystem) WriteFile(ctx context.Context, filename string, data []byte, perm os.FileMode) error { + if target, mutable := commit.GetMutableTarget(ctx); mutable { + return f.mutableFSFor(ctx, target).WriteFile(filename, data, perm) + } + return ErrImmutableFilesystem +} + +// READ OPS + +func (f *fileSystem) Open(ctx context.Context, name string) (fs.File, error) { + if target, mutable := commit.GetMutableTarget(ctx); mutable { + return f.mutableFSFor(ctx, target).Open(name) + } + h, err := f.hashFor(ctx) + if err != nil { + return nil, err + } + fi, t, err := f.stat(h, name) + if err != nil { + return nil, err + } + ff, err := t.File(name) + if err != nil { + return nil, err + } + rc, err := ff.Reader() + if err != nil { + return nil, err + } + return &fileWrapper{fi, rc}, nil +} + +type fileWrapper struct { + fi fs.FileInfo + io.ReadCloser +} + +func (f *fileWrapper) Stat() (fs.FileInfo, error) { return f.fi, nil } + +func (f *fileSystem) Stat(ctx context.Context, name string) (fs.FileInfo, error) { + if target, mutable := commit.GetMutableTarget(ctx); mutable { + return f.mutableFSFor(ctx, target).Stat(name) + } + h, err := f.hashFor(ctx) + if err != nil { + return nil, err + } + fi, _, err := f.stat(h, name) + return fi, err +} + +func (f *fileSystem) stat(h *plumbing.Hash, name string) (fs.FileInfo, *object.Tree, error) { + c, err := f.bareRepo.CommitObject(*h) + if err != nil { + return nil, nil, err + } + t, err := c.Tree() + if err != nil { + return nil, nil, err + } + te, err := t.FindEntry(name) + if err != nil { + // As part of the Stat contract, return os.ErrNotExist if the file doesn't exist + return nil, nil, multierr.Combine(os.ErrNotExist, err) + } + fi, err := newFileInfo(te, t, c) + return fi, t, err +} + +func newFileInfo(te *object.TreeEntry, t *object.Tree, c *object.Commit) (*fileInfoWrapper, error) { + sz, err := t.Size(te.Name) + if err != nil { + return nil, err + } + return &fileInfoWrapper{te, sz, c.Committer.When}, nil +} + +type fileInfoWrapper struct { + te *object.TreeEntry + sz int64 + commitTime time.Time +} + +func (i *fileInfoWrapper) Name() string { return filepath.Base(i.te.Name) } // TODO: Needed? +func (i *fileInfoWrapper) Size() int64 { return i.sz } +func (i *fileInfoWrapper) ModTime() time.Time { return i.commitTime } +func (i *fileInfoWrapper) IsDir() bool { return i.Mode().IsDir() } +func (i *fileInfoWrapper) Sys() interface{} { return nil } +func (i *fileInfoWrapper) Type() fs.FileMode { return i.Mode() } +func (i *fileInfoWrapper) Info() (fs.FileInfo, error) { return i, nil } +func (i *fileInfoWrapper) Mode() fs.FileMode { + fm, _ := i.te.Mode.ToOSFileMode() + return fm +} + +func (f *fileSystem) ReadDir(ctx context.Context, dirname string) ([]fs.DirEntry, error) { + if target, mutable := commit.GetMutableTarget(ctx); mutable { + return f.mutableFSFor(ctx, target).ReadDir(dirname) + } + h, err := f.hashFor(ctx) + if err != nil { + return nil, err + } + c, err := f.bareRepo.CommitObject(*h) + if err != nil { + return nil, err + } + t, err := c.Tree() + if err != nil { + return nil, err + } + tw := object.NewTreeWalker(t, false, nil) + infos := []fs.DirEntry{} + for { + _, te, err := tw.Next() + if errors.Is(err, io.EOF) { + break + } + if err != nil { + return nil, err + } + fi, err := newFileInfo(&te, t, c) + if err != nil { + return nil, err + } + infos = append(infos, fi) + } + return infos, nil +} + +func (f *fileSystem) ReadFile(ctx context.Context, filename string) ([]byte, error) { + if target, mutable := commit.GetMutableTarget(ctx); mutable { + return f.mutableFSFor(ctx, target).ReadFile(filename) + } + return nil, ErrImmutableFilesystem // TODO +} + +func (f *fileSystem) Checksum(ctx context.Context, filename string) (string, error) { + if target, mutable := commit.GetMutableTarget(ctx); mutable { + return f.mutableFSFor(ctx, target).Checksum(filename) + } + + h, err := f.hashFor(ctx) + if err != nil { + return "", err + } + // Do a stat such that os.ErrNotExist is retuned if the file doesn't exist + if _, _, err := f.stat(h, filename); err != nil { + return "", err + } + return h.String(), nil } diff --git a/pkg/storage/client/transactional/distributed/git/git.go b/pkg/storage/client/transactional/distributed/git/git.go index c825634a..c5f754eb 100644 --- a/pkg/storage/client/transactional/distributed/git/git.go +++ b/pkg/storage/client/transactional/distributed/git/git.go @@ -1,19 +1,6 @@ package git -import ( - "context" - "errors" - "fmt" - "io/ioutil" - "os" - "sync" - - "github.com/fluxcd/go-git-providers/gitprovider" - log "github.com/sirupsen/logrus" - "github.com/weaveworks/libgitops/pkg/storage/client/transactional" - "github.com/weaveworks/libgitops/pkg/storage/client/transactional/distributed" -) - +/* var ( // ErrNotStarted happens if you try to operate on the LocalClone before you have started // it with StartCheckoutLoop. @@ -105,9 +92,9 @@ func (d *LocalClone) canWrite() bool { // verifyRead makes sure it's ok to start a read-something-from-git process func (d *LocalClone) verifyRead() error { // Safeguard against not starting yet - /*if d.wt == nil { + *if d.wt == nil { return fmt.Errorf("cannot pull: %w", ErrNotStarted) - }*/ + }* return nil } @@ -241,3 +228,4 @@ func (d *LocalClone) Cleanup() error { } return nil } +*/ diff --git a/pkg/storage/client/transactional/distributed/git/github/github.go b/pkg/storage/client/transactional/distributed/git/github/github.go index 23a20128..702c6213 100644 --- a/pkg/storage/client/transactional/distributed/git/github/github.go +++ b/pkg/storage/client/transactional/distributed/git/github/github.go @@ -7,38 +7,26 @@ import ( "github.com/fluxcd/go-git-providers/github" "github.com/fluxcd/go-git-providers/gitprovider" - "github.com/fluxcd/go-git-providers/validation" gogithub "github.com/google/go-github/v32/github" "github.com/sirupsen/logrus" "github.com/weaveworks/libgitops/pkg/storage/client/transactional" + "github.com/weaveworks/libgitops/pkg/storage/commit" + "github.com/weaveworks/libgitops/pkg/storage/commit/pr" + "k8s.io/apimachinery/pkg/util/validation/field" ) -// PullRequest can be returned from a TransactionFunc instead of a CommitResult, if -// a PullRequest is desired to be created by the PullRequestProvider. -type PullRequest interface { - // PullRequestResult is a superset of CommitResult - transactional.Commit +// PullRequest implements pr.Request. +var _ pr.Request = PullRequest{} - // GetLabels specifies what labels should be applied on the PR. - // +optional - GetLabels() []string - // GetAssignees specifies what user login names should be assigned to this PR. - // Note: Only users with "pull" access or more can be assigned. - // +optional - GetAssignees() []string - // GetMilestone specifies what milestone this should be attached to. - // +optional - GetMilestone() string -} - -// GenericPullRequest implements PullRequest. -var _ PullRequest = GenericPullRequest{} - -// GenericPullRequest implements PullRequest. -type GenericPullRequest struct { - // GenericPullRequest is a superset of a Commit. - transactional.Commit +// PullRequest implements PullRequest. +type PullRequest struct { + // PullRequest is a superset of any Commit. + commit.Request + // TargetBranch specifies what branch the Pull Request head branch should + // be merged into. + // +required + TargetBranch string // Labels specifies what labels should be applied on the PR. // +optional Labels []string @@ -51,17 +39,29 @@ type GenericPullRequest struct { Milestone string } -func (r GenericPullRequest) GetLabels() []string { return r.Labels } -func (r GenericPullRequest) GetAssignees() []string { return r.Assignees } -func (r GenericPullRequest) GetMilestone() string { return r.Milestone } +func (r PullRequest) PullRequest() pr.Metadata { + return &metadata{&r.Labels, &r.Assignees, &r.TargetBranch, &r.Milestone} +} + +func (r PullRequest) Validate() error { + root := field.NewPath("github.PullRequest") + allErrs := field.ErrorList{} + if err := r.Request.Validate(); err != nil { + allErrs = append(allErrs, field.Invalid(root.Child("Request"), r.Request, err.Error())) + } + return allErrs.ToAggregate() +} -func (r GenericPullRequest) Validate() error { - v := validation.New("GenericPullRequest") - // Just validate the "inner" object - v.Append(r.Commit.Validate(), r.Commit, "Commit") - return v.Error() +type metadata struct { + labels, assignees *[]string + targetBranch, milestone *string } +func (m *metadata) TargetBranch() string { return *m.targetBranch } +func (m *metadata) Labels() []string { return *m.labels } +func (m *metadata) Assignees() []string { return *m.assignees } +func (m *metadata) Milestone() string { return *m.milestone } + // TODO: This package should really only depend on go-git-providers' abstraction interface var ErrProviderNotSupported = errors.New("only the Github go-git-providers provider is supported at the moment") @@ -80,17 +80,17 @@ type prCreator struct { repoRef gitprovider.RepositoryRef } -func (c *prCreator) PreCommitHook(ctx context.Context, commit transactional.Commit, info transactional.TxInfo) error { +func (c *prCreator) PreCommitHook(ctx context.Context, info transactional.TxInfo, req commit.Request) error { return nil } -func (c *prCreator) PostCommitHook(ctx context.Context, commit transactional.Commit, info transactional.TxInfo) error { +func (c *prCreator) PostCommitHook(ctx context.Context, info transactional.TxInfo, req commit.Request) error { // First, validate the input - if err := commit.Validate(); err != nil { - return fmt.Errorf("given transactional.Commit wasn't valid") + if err := req.Validate(); err != nil { + return fmt.Errorf("given commit.Request wasn't valid: %v", err) } - prCommit, ok := commit.(PullRequest) + prCommit, ok := req.(pr.Request) if !ok { return nil } @@ -102,15 +102,15 @@ func (c *prCreator) PostCommitHook(ctx context.Context, commit transactional.Com owner := c.repoRef.GetIdentity() repo := c.repoRef.GetRepository() var body *string - if commit.GetMessage().GetDescription() != "" { - body = gogithub.String(commit.GetMessage().GetDescription()) + if prCommit.Message().Description() != "" { + body = gogithub.String(prCommit.Message().Description()) } // Create the Pull Request prPayload := &gogithub.NewPullRequest{ - Head: gogithub.String(info.Head), - Base: gogithub.String(info.Base), - Title: gogithub.String(commit.GetMessage().GetTitle()), + Head: gogithub.String(info.Target.DestBranch()), + Base: gogithub.String(prCommit.PullRequest().TargetBranch()), + Title: gogithub.String(prCommit.Message().Title()), Body: body, } logrus.Infof("GitHub PR payload: %+v", prPayload) @@ -122,8 +122,8 @@ func (c *prCreator) PostCommitHook(ctx context.Context, commit transactional.Com // If spec.GetMilestone() is set, fetch the ID of the milestone // Only set milestoneID to non-nil if specified var milestoneID *int - if len(prCommit.GetMilestone()) != 0 { - milestoneID, err = getMilestoneID(ctx, ghClient, owner, repo, prCommit.GetMilestone()) + if len(prCommit.PullRequest().Milestone()) != 0 { + milestoneID, err = getMilestoneID(ctx, ghClient, owner, repo, prCommit.PullRequest().Milestone()) if err != nil { return err } @@ -131,13 +131,13 @@ func (c *prCreator) PostCommitHook(ctx context.Context, commit transactional.Com // Only set assignees to non-nil if specified var assignees *[]string - if a := prCommit.GetAssignees(); len(a) != 0 { + if a := prCommit.PullRequest().Assignees(); len(a) != 0 { assignees = &a } // Only set labels to non-nil if specified var labels *[]string - if l := prCommit.GetLabels(); len(l) != 0 { + if l := prCommit.PullRequest().Labels(); len(l) != 0 { labels = &l } diff --git a/pkg/storage/client/transactional/distributed/git/gogit.go b/pkg/storage/client/transactional/distributed/git/gogit.go index 01ae34a7..719925ae 100644 --- a/pkg/storage/client/transactional/distributed/git/gogit.go +++ b/pkg/storage/client/transactional/distributed/git/gogit.go @@ -1,23 +1,7 @@ package git -import ( - "context" - "errors" - "fmt" - "sync" - "time" - - "github.com/fluxcd/go-git-providers/gitprovider" - git "github.com/go-git/go-git/v5" - "github.com/go-git/go-git/v5/config" - "github.com/go-git/go-git/v5/plumbing" - "github.com/go-git/go-git/v5/plumbing/object" - log "github.com/sirupsen/logrus" - "github.com/weaveworks/libgitops/pkg/storage/client/transactional" - "k8s.io/apimachinery/pkg/util/sets" -) - -func NewGoGit(ctx context.Context, repoRef gitprovider.RepositoryRef, dir string, opts *Options) (Interface, error) { +/* +func NewGoGit(ctx context.Context, repoRef gitprovider.RepositoryRef, dir string, opts *Options) (*goGit, error) { gg := &goGit{ repoRef: repoRef, dir: dir, @@ -239,6 +223,26 @@ func (g *goGit) IsWorktreeClean(_ context.Context) (bool, error) { return s.IsClean(), nil } +func (g *goGit) fileAtCommit(_ context.Context, commit, file string) (*object.File, *object.Commit, error) { + c, err := g.repo.CommitObject(plumbing.NewHash(commit)) + if err != nil { + return nil, nil, err + } + f, err := c.File(file) + if err != nil { + return nil, nil, err + } + return f, c, nil +} + +func (g *goGit) Stat(ctx context.Context, commit, file string) (fs.FileInfo, error) { + f, c, err := g.fileAtCommit(ctx, commit, file) + if err != nil { + return nil, err + } + return &fileInfoWrapper{f, c.Committer.When}, nil +} + func (g *goGit) ReadFileAtCommit(_ context.Context, commit string, file string) ([]byte, error) { c, err := g.repo.CommitObject(plumbing.NewHash(commit)) if err != nil { @@ -276,3 +280,4 @@ func sameRevisionRefSpecs(revision string) []config.RefSpec { } return []config.RefSpec{config.RefSpec(revision)} } +*/ diff --git a/pkg/storage/client/transactional/distributed/git/gogit_test.go b/pkg/storage/client/transactional/distributed/git/gogit_test.go index 8797e16e..dbac532a 100644 --- a/pkg/storage/client/transactional/distributed/git/gogit_test.go +++ b/pkg/storage/client/transactional/distributed/git/gogit_test.go @@ -1,17 +1,18 @@ package git import ( - "context" - "fmt" - "io/ioutil" + "errors" + "io/fs" "os" - "reflect" - "strings" "testing" - - "github.com/fluxcd/go-git-providers/gitprovider" ) +func TestStat(t *testing.T) { + fi, err := os.Stat("nonexist.yaml") + t.Error(fi, err, errors.Is(err, fs.ErrNotExist)) +} + +/* type filesChangedSubTest struct { fromCommit string toCommit string @@ -86,7 +87,7 @@ func Test_goGit(t *testing.T) { } } -func Subtest_filesChanged(t *testing.T, g Interface, tests []filesChangedSubTest) { +func Subtest_filesChanged(t *testing.T, g *goGit, tests []filesChangedSubTest) { ctx := context.Background() for i, tt := range tests { t.Run(fmt.Sprintf("filesChanged_%d", i), func(t *testing.T) { @@ -102,7 +103,7 @@ func Subtest_filesChanged(t *testing.T, g Interface, tests []filesChangedSubTest } } -func Subtest_readFiles(t *testing.T, g Interface, tests []readFileSubTest) { +func Subtest_readFiles(t *testing.T, g *goGit, tests []readFileSubTest) { ctx := context.Background() for i, tt := range tests { t.Run(fmt.Sprintf("readFiles_%d", i), func(t *testing.T) { @@ -122,3 +123,4 @@ func Subtest_readFiles(t *testing.T, g Interface, tests []readFileSubTest) { }) } } +*/ diff --git a/pkg/storage/client/transactional/distributed/git/interfaces.go b/pkg/storage/client/transactional/distributed/git/interfaces.go index 357dfcc0..732e72dc 100644 --- a/pkg/storage/client/transactional/distributed/git/interfaces.go +++ b/pkg/storage/client/transactional/distributed/git/interfaces.go @@ -1,13 +1,6 @@ package git -import ( - "context" - - "github.com/weaveworks/libgitops/pkg/storage/client/transactional" - "k8s.io/apimachinery/pkg/util/sets" -) - -type Interface interface { +/*type Interface interface { Pull(ctx context.Context) error Fetch(ctx context.Context, revision string) error Push(ctx context.Context, branchName string) error @@ -18,4 +11,4 @@ type Interface interface { IsWorktreeClean(ctx context.Context) (bool, error) ReadFileAtCommit(ctx context.Context, commit string, file string) ([]byte, error) CommitAt(ctx context.Context, branch string) (string, error) -} +}*/ diff --git a/pkg/storage/client/transactional/distributed/git/options.go b/pkg/storage/client/transactional/distributed/git/options.go index 2613ca1a..0f4ad5be 100644 --- a/pkg/storage/client/transactional/distributed/git/options.go +++ b/pkg/storage/client/transactional/distributed/git/options.go @@ -16,7 +16,7 @@ type Option interface { ApplyTo(*Options) } -func (o *Options) ApplyToTx(target *Options) { +func (o *Options) ApplyTo(target *Options) { if o.MainBranch != "" { target.MainBranch = o.MainBranch } diff --git a/pkg/storage/client/transactional/distributed/interfaces.go b/pkg/storage/client/transactional/distributed/interfaces.go index 15272b2e..31262667 100644 --- a/pkg/storage/client/transactional/distributed/interfaces.go +++ b/pkg/storage/client/transactional/distributed/interfaces.go @@ -5,16 +5,24 @@ import ( "time" "github.com/weaveworks/libgitops/pkg/storage/client/transactional" + "github.com/weaveworks/libgitops/pkg/storage/commit" ) +type Client interface { + GenericClient + + AtHash(commit.Hash) Client + AtRef(commit.Ref) Client +} + // Client is a client that can sync state with a remote in a transactional way. // // A distributed.Client is itself most likely both a CommitHook and TransactionHook; if so, // it should be automatically registered with the transactional.Client's *HookChain in the // distributed.Client's constructor. -type Client interface { +type GenericClient interface { // The distributed Client extends the transactional Client - transactional.Client + transactional.GenericClient // StartResyncLoop starts a resync loop for the given branches for // the given interval. @@ -23,7 +31,7 @@ type Client interface { // (remote Pulls) should be run in the background. The duration must // be positive, and non-zero. // - // resyncBranches specifies what branches to resync. The default is + // resync specifies what symbolic references to sync. The default is // []string{""}, i.e. only the "default" branch. // // ctx should be used to cancel the loop, if needed. @@ -33,7 +41,7 @@ type Client interface { // you need. The branches will be pulled synchronously in order. The // resync interval is non-sliding, which means that the interval // includes the time of the operations. - StartResyncLoop(ctx context.Context, resyncCacheInterval time.Duration, resyncBranches ...string) + StartResyncLoop(ctx context.Context, resyncCacheInterval time.Duration, resync ...commit.Ref) // Remote exposes the underlying remote used Remote() Remote diff --git a/pkg/storage/client/transactional/handlers.go b/pkg/storage/client/transactional/handlers.go index 90cd3a55..2890eb34 100644 --- a/pkg/storage/client/transactional/handlers.go +++ b/pkg/storage/client/transactional/handlers.go @@ -3,13 +3,12 @@ package transactional import ( "context" - "github.com/weaveworks/libgitops/pkg/storage/client/transactional/commit" + "github.com/weaveworks/libgitops/pkg/storage/commit" ) type TxInfo struct { - BaseCommit commit.Hash - HeadBranch string - Options TxOptions + Target commit.MutableTarget + Options TxOptions } type CommitHookChain interface { @@ -25,13 +24,13 @@ type CommitHookChain interface { type CommitHook interface { // PreCommitHook executes arbitrary logic for the given transaction info // and commit info; if an error is returned, the commit won't happen. - PreCommitHook(ctx context.Context, req commit.Request, info TxInfo) error + PreCommitHook(ctx context.Context, info TxInfo, req commit.Request) error // PostCommitHook executes arbitrary logic for the given transaction info // and commit info; if an error is returned, the commit will happen in the // case of a BranchTx on the head branch; but the transaction itself will // fail. In the case of a "normal" transaction; the commit will be made, // but later rolled back. - PostCommitHook(ctx context.Context, req commit.Request, info TxInfo) error + PostCommitHook(ctx context.Context, info TxInfo, req commit.Request) error } var _ CommitHookChain = &MultiCommitHook{} @@ -45,24 +44,24 @@ func (m *MultiCommitHook) Register(h CommitHook) { m.CommitHooks = append(m.CommitHooks, h) } -func (m *MultiCommitHook) PreCommitHook(ctx context.Context, req commit.Request, info TxInfo) error { +func (m *MultiCommitHook) PreCommitHook(ctx context.Context, info TxInfo, req commit.Request) error { for _, ch := range m.CommitHooks { if ch == nil { continue } - if err := ch.PreCommitHook(ctx, req, info); err != nil { + if err := ch.PreCommitHook(ctx, info, req); err != nil { return err } } return nil } -func (m *MultiCommitHook) PostCommitHook(ctx context.Context, req commit.Request, info TxInfo) error { +func (m *MultiCommitHook) PostCommitHook(ctx context.Context, info TxInfo, req commit.Request) error { for _, ch := range m.CommitHooks { if ch == nil { continue } - if err := ch.PostCommitHook(ctx, req, info); err != nil { + if err := ch.PostCommitHook(ctx, info, req); err != nil { return err } } diff --git a/pkg/storage/client/transactional/interfaces.go b/pkg/storage/client/transactional/interfaces.go index 79b0cac5..d0f979c6 100644 --- a/pkg/storage/client/transactional/interfaces.go +++ b/pkg/storage/client/transactional/interfaces.go @@ -4,15 +4,21 @@ import ( "context" "github.com/weaveworks/libgitops/pkg/storage/client" - "github.com/weaveworks/libgitops/pkg/storage/client/transactional/commit" + "github.com/weaveworks/libgitops/pkg/storage/commit" "github.com/weaveworks/libgitops/pkg/storage/core" ) type Client interface { - client.Reader + GenericClient + AtHash(commit.Hash) Client AtRef(commit.Ref) Client - AtSymbolicRef(string) Client +} + +type GenericClient interface { + client.Reader + + CurrentHash() (commit.Hash, error) CurrentRef() commit.Ref TransactionManager() TransactionManager @@ -47,8 +53,8 @@ type TransactionManager interface { Abort(ctx context.Context, tx *TxInfo) error - RefResolver() commit.RefResolver - CommitResolver() commit.Resolver + //RefResolver() commit.RefResolver + //CommitResolver() commit.Resolver // CreateBranch creates a new branch with the given target branch name. It forks out // of the branch specified in the context. diff --git a/pkg/storage/client/transactional/tx.go b/pkg/storage/client/transactional/tx.go index 5b13d2e3..b2380f43 100644 --- a/pkg/storage/client/transactional/tx.go +++ b/pkg/storage/client/transactional/tx.go @@ -1,6 +1,6 @@ package transactional -import "github.com/weaveworks/libgitops/pkg/storage/client/transactional/commit" +import "github.com/weaveworks/libgitops/pkg/storage/commit" type txImpl struct { *txCommon diff --git a/pkg/storage/client/transactional/tx_common.go b/pkg/storage/client/transactional/tx_common.go index 42557a80..0cab3044 100644 --- a/pkg/storage/client/transactional/tx_common.go +++ b/pkg/storage/client/transactional/tx_common.go @@ -4,7 +4,7 @@ import ( "context" "github.com/weaveworks/libgitops/pkg/storage/client" - "github.com/weaveworks/libgitops/pkg/storage/client/transactional/commit" + "github.com/weaveworks/libgitops/pkg/storage/commit" utilerrs "k8s.io/apimachinery/pkg/util/errors" ) @@ -35,7 +35,7 @@ func (tx *txCommon) Abort(err error) error { func (tx *txCommon) handlePreCommit(c commit.Request) txFunc { return func() error { - return tx.commitHook.PreCommitHook(tx.ctx, c, tx.info) + return tx.commitHook.PreCommitHook(tx.ctx, tx.info, c) } } @@ -47,7 +47,7 @@ func (tx *txCommon) commit(c commit.Request) txFunc { func (tx *txCommon) handlePostCommit(c commit.Request) txFunc { return func() error { - return tx.commitHook.PostCommitHook(tx.ctx, c, tx.info) + return tx.commitHook.PostCommitHook(tx.ctx, tx.info, c) } } diff --git a/pkg/storage/commit/commit.go b/pkg/storage/commit/commit.go new file mode 100644 index 00000000..4c3120fd --- /dev/null +++ b/pkg/storage/commit/commit.go @@ -0,0 +1,257 @@ +package commit + +import ( + "context" + "encoding/hex" + "errors" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/uuid" +) + +/*type Resolver interface { + ResolveHash(Hash) (Commit, error) +} + +type Commit interface { + Hash() Hash + Author() Signature + Message() Message + Parents() []Hash +}*/ + +type Request interface { + Author() Signature + Message() Message + Validate() error +} + +type Signature interface { + // Name describes the author's name (e.g. as per git config) + // +required + Name() string + // Email describes the author's email (e.g. as per git config). + // It is optional generally, but might be required by some specific + // implementations. + // +optional + Email() string + // When is the timestamp of the signature. + // +optional + When() *time.Time + // The String() method must return a (ideally both human- and machine- + // readable) concatenated string including the name and email (if + // applicable) of the author. + fmt.Stringer +} + +type Message interface { + // Title describes the change concisely, so it can be used e.g. as + // a commit message or PR title. Certain implementations might enforce + // character limits on this string. + // +required + Title() string + // Description contains optional extra, more detailed information + // about the change. + // +optional + Description() string + // The String() method must return a (ideally both human- and machine- + // readable) concatenated string including the title and description + // (if applicable) of the author. + fmt.Stringer +} + +// Hash represents an immutable commit hash, represented as a set of "raw" bytes, +// probably from some hash function (e.g. SHA-1 or SHA-2-256), along with a well-defined +// string representation, e.g. Hexadecimal encoding. +type Hash interface { + Hash() []byte + // TODO: Rename to encoded and keep fmt.Stringer a debug print? + String() string + + // RefSource returns the source of this computed Hash lock. Can be nil, + // in case this doesn't have a symbolic source. This can be used for consumers + // to understand how this immutable revision was computed. + // TODO: Do we need this? + // RefSource() Ref +} + +func WithHash(ctx context.Context, h Hash) context.Context { + if h == nil { + return ctx + } + return context.WithValue(ctx, hashCtxKey, h) +} + +func GetHash(ctx context.Context) (Hash, bool) { + h, ok := ctx.Value(hashCtxKey).(Hash) + return h, ok +} + +type hashCtxKeyStruct struct{} + +var hashCtxKey = hashCtxKeyStruct{} + +type RefResolver interface { + ResolveRef(Ref) (Hash, error) + // GetRef extracts the Ref from the context, and if empty, + // defaults it to the default Ref. + GetRef(ctx context.Context) Ref +} + +func SHA1(h [20]byte, src Ref) Hash { + b := make([]byte, 20) + copy(b, h[:]) + return &hash{hash: b, encoded: hex.EncodeToString(b), src: src} +} + +func SHA1String(h string, src Ref) (Hash, bool) { + b, err := hex.DecodeString(h) + if err != nil { + return nil, false + } + return &hash{hash: b, encoded: h, src: src}, true +} + +func At(symbolic string) Ref { + return &symbolicRef{RefTypeUnknown, symbolic, 0} +} + +func Default() Ref { + return AtBranch("") // Signifies the default branch +} + +func AtBranch(b string) Ref { + return Before(b, 0) +} + +func Before(b string, n uint8) Ref { + return &symbolicRef{RefTypeBranch, b, n} +} + +func AtTag(t string) Ref { + return &symbolicRef{RefTypeTag, t, 0} +} + +func AtHash(h string) Ref { + return &symbolicRef{RefTypeHash, h, 0} +} + +type RefType int + +func (t RefType) String() string { + switch t { + case RefTypeUnknown: + return "unknown" + case RefTypeHash: + return "hash" + case RefTypeBranch: + return "branch" + case RefTypeTag: + return "tag" + default: + return fmt.Sprintf("", t) + } +} + +const ( + RefTypeUnknown RefType = iota + RefTypeHash + // A branch is generally a mutable + RefTypeBranch + RefTypeTag +) + +type Ref interface { + Resolve(RefResolver) (Hash, error) + + // TODO: Keep fmt.Stringer for debug printing, rename to Target() string? + Target() string + Type() RefType + Before() uint8 +} + +func WithRef(ctx context.Context, s Ref) context.Context { + if s == nil { + return ctx + } + return context.WithValue(ctx, symbolicCtxKey, s) +} + +func GetRef(ctx context.Context) (Ref, bool) { + s, ok := ctx.Value(symbolicCtxKey).(Ref) + return s, ok +} + +type symbolicCtxKeyStruct struct{} + +var symbolicCtxKey = symbolicCtxKeyStruct{} + +type hash struct { + hash []byte + encoded string + src Ref +} + +func (h *hash) Hash() []byte { return h.hash } +func (h *hash) String() string { return h.encoded } +func (h *hash) RefSource() Ref { return h.src } + +type symbolicRef struct { + st RefType + ref string + before uint8 +} + +func (r *symbolicRef) Target() string { return r.ref } +func (r *symbolicRef) Before() uint8 { return r.before } +func (r *symbolicRef) Type() RefType { return r.st } +func (r *symbolicRef) Resolve(res RefResolver) (Hash, error) { + // TODO: This is probably resolver-specific + if r.before != 0 && r.st != RefTypeUnknown && r.st != RefTypeBranch { + return nil, errors.New("setting Before() only works for branches") + } + return res.ResolveRef(r) +} + +type MutableTarget interface { + // The branch to which the resulting commit from the transaction + // is added. + DestBranch() string + + BaseCommit() Hash + UUID() types.UID + + // TODO: Implement fmt.Stringer for debug printing +} + +func NewMutableTarget(headBranch string, baseCommit Hash) MutableTarget { + return &mutableTarget{headBranch: headBranch, baseCommit: baseCommit, uuid: uuid.NewUUID()} +} + +type mutableTarget struct { + headBranch string + baseCommit Hash + uuid types.UID +} + +func (m *mutableTarget) DestBranch() string { return m.headBranch } +func (m *mutableTarget) BaseCommit() Hash { return m.baseCommit } +func (m *mutableTarget) UUID() types.UID { return m.uuid } + +func WithMutableTarget(ctx context.Context, m MutableTarget) context.Context { + if m == nil { + return ctx + } + return context.WithValue(ctx, mutableCtxKey, m) +} + +func GetMutableTarget(ctx context.Context) (MutableTarget, bool) { + mt, ok := ctx.Value(mutableCtxKey).(MutableTarget) + return mt, ok +} + +type mutableCtxKeyStruct struct{} + +var mutableCtxKey = mutableCtxKeyStruct{} diff --git a/pkg/storage/commit/pr/pull_request.go b/pkg/storage/commit/pr/pull_request.go new file mode 100644 index 00000000..589b6988 --- /dev/null +++ b/pkg/storage/commit/pr/pull_request.go @@ -0,0 +1,28 @@ +package pr + +import "github.com/weaveworks/libgitops/pkg/storage/commit" + +// Request can be returned when committing a transaction instead of a +// commit.Request, if the intention is to create a PR in e.g. GitHub. +type Request interface { + // PullRequest is a superset of commit.Request + commit.Request + PullRequest() Metadata +} + +type Metadata interface { + // TargetBranch specifies what branch the Pull Request head branch should + // be merged into. + // +required + TargetBranch() string + // Labels specifies what labels should be applied on the PR. + // +optional + Labels() []string + // Assignees specifies what user login names should be assigned to this PR. + // Note: Only users with "pull" access or more can be assigned. + // +optional + Assignees() []string + // Milestone specifies what milestone this should be attached to. + // +optional + Milestone() string +} diff --git a/pkg/storage/commit/request.go b/pkg/storage/commit/request.go new file mode 100644 index 00000000..573e315d --- /dev/null +++ b/pkg/storage/commit/request.go @@ -0,0 +1,59 @@ +package commit + +import ( + "fmt" + "time" + + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +var _ Request = GenericRequest{} + +type GenericRequest struct { + Name string + Email string + When *time.Time + Title string + Description string +} + +func (r GenericRequest) Author() Signature { + return &signature{&r.Name, &r.Email, r.When} +} +func (r GenericRequest) Message() Message { + return &message{&r.Title, &r.Description} +} +func (r GenericRequest) Validate() error { + root := field.NewPath("commit.GenericRequest") + allErrs := field.ErrorList{} + if len(r.Name) == 0 { + allErrs = append(allErrs, field.Required(root.Child("Name"), validation.EmptyError())) + } + // TODO: Should this be optional or not? + if len(r.Email) == 0 { + allErrs = append(allErrs, field.Required(root.Child("Email"), validation.EmptyError())) + } + if len(r.Title) == 0 { + allErrs = append(allErrs, field.Required(root.Child("Title"), validation.EmptyError())) + } + return allErrs.ToAggregate() +} + +type signature struct { + name, email *string + when *time.Time +} + +func (s *signature) Name() string { return *s.name } +func (s *signature) Email() string { return *s.email } +func (s *signature) When() *time.Time { return s.when } +func (s *signature) String() string { return fmt.Sprintf("%s <%s>", s.Name(), s.Email()) } + +type message struct { + title, desc *string +} + +func (m *message) Title() string { return *m.title } +func (m *message) Description() string { return *m.desc } +func (m *message) String() string { return fmt.Sprintf("%s\n\n%s", m.Title(), m.Description()) } diff --git a/pkg/storage/event/event.go b/pkg/storage/event/event.go index 3967b2dc..9a6ee903 100644 --- a/pkg/storage/event/event.go +++ b/pkg/storage/event/event.go @@ -1,11 +1,6 @@ package event -import ( - "fmt" - - "github.com/weaveworks/libgitops/pkg/storage/core" -) - +/* // ObjectEventType is an enum describing a change in an Object's state. type ObjectEventType byte @@ -52,3 +47,4 @@ type ObjectEvent struct { // ObjectEventStream is a channel of ObjectEvents type ObjectEventStream chan *ObjectEvent +*/ diff --git a/pkg/storage/event/interfaces.go b/pkg/storage/event/interfaces.go index f72a9cf6..112634fb 100644 --- a/pkg/storage/event/interfaces.go +++ b/pkg/storage/event/interfaces.go @@ -5,6 +5,7 @@ import ( "io" "github.com/weaveworks/libgitops/pkg/storage" + "k8s.io/apimachinery/pkg/watch" ) // EventStorage is the abstract combination of a normal Storage, and @@ -20,7 +21,9 @@ type Storage interface { // limit large enough to not block normal operation. An error might // be returned if a maximum amount of watches has been opened already, // e.g. ErrTooManyWatches. - WatchForObjectEvents(ctx context.Context, into ObjectEventStream) error + //WatchForObjectEvents(ctx context.Context, into ObjectEventStream) error + + Watch(ctx context.Context) (watch.Interface, error) // Close closes the EventStorage and underlying resources gracefully. io.Closer diff --git a/pkg/storage/filesystem/dir_traversal.go b/pkg/storage/filesystem/dir_traversal.go index 31ec7f1d..b5be448b 100644 --- a/pkg/storage/filesystem/dir_traversal.go +++ b/pkg/storage/filesystem/dir_traversal.go @@ -2,20 +2,21 @@ package filesystem import ( "context" - "os" + "io/fs" ) // ListValidFilesInFilesystem discovers files in the given Filesystem that has a // ContentType that contentTyper recognizes, and is not a path that is excluded by // pathExcluder. -func ListValidFilesInFilesystem(ctx context.Context, fs Filesystem, contentTyper ContentTyper, pathExcluder PathExcluder) (files []string, err error) { - err = fs.Walk(ctx, "", func(path string, info os.FileInfo, err error) error { +func ListValidFilesInFilesystem(ctx context.Context, givenFs Filesystem, contentTyper ContentTyper, pathExcluder PathExcluder) (files []string, err error) { + fsys := givenFs.WithContext(ctx) + err = fs.WalkDir(fsys, "", func(path string, d fs.DirEntry, err error) error { if err != nil { return err } // Only include valid files - if !info.IsDir() && IsValidFileInFilesystem(ctx, fs, contentTyper, pathExcluder, path) { + if !d.IsDir() && IsValidFileInFilesystem(ctx, givenFs, contentTyper, pathExcluder, path) { files = append(files, path) } return nil diff --git a/pkg/storage/filesystem/fileevents/events.go b/pkg/storage/filesystem/fileevents/events.go index 4c4e09a4..09822c4a 100644 --- a/pkg/storage/filesystem/fileevents/events.go +++ b/pkg/storage/filesystem/fileevents/events.go @@ -28,6 +28,8 @@ func (e FileEventType) String() string { // FileEvent describes a file change of a certain kind at a certain // (relative) path. Often emitted by FileEventsEmitter. type FileEvent struct { + // TODO: Include some kind of commit.Hash here that is optional? + // TODO: Make this an interface? Path string Type FileEventType diff --git a/pkg/storage/filesystem/fileevents/interfaces.go b/pkg/storage/filesystem/fileevents/interfaces.go index 5a72e97d..ea566f2d 100644 --- a/pkg/storage/filesystem/fileevents/interfaces.go +++ b/pkg/storage/filesystem/fileevents/interfaces.go @@ -33,6 +33,8 @@ type Emitter interface { // Suspend blocks the next event dispatch for this given path. Useful // for not sending "your own" modification events into the // FileEventStream that is listening. path is relative. + // TODO: Should this be handled at this level, or should the "figure out + // what is my own changes" be handled at higher levels in the stack? Suspend(ctx context.Context, path string) // Close closes the emitter gracefully. diff --git a/pkg/storage/filesystem/filefinder_simple.go b/pkg/storage/filesystem/filefinder_simple.go index 2cad315e..b1a2b167 100644 --- a/pkg/storage/filesystem/filefinder_simple.go +++ b/pkg/storage/filesystem/filefinder_simple.go @@ -2,6 +2,7 @@ package filesystem import ( "context" + "errors" "fmt" "os" "path/filepath" @@ -27,9 +28,9 @@ func NewSimpleStorage(dir string, namespacer storage.Namespacer, opts SimpleFile return NewGeneric(fileFinder, namespacer) } -func NewSimpleFileFinder(fs Filesystem, opts SimpleFileFinderOptions) (*SimpleFileFinder, error) { - if fs == nil { - return nil, fmt.Errorf("NewSimpleFileFinder: fs is mandatory") +func NewSimpleFileFinder(fsimpl Filesystem, opts SimpleFileFinderOptions) (*SimpleFileFinder, error) { + if fsimpl == nil { + return nil, fmt.Errorf("NewSimpleFileFinder: fsimpl is mandatory") } ct := content.ContentTypeJSON if len(opts.ContentType) != 0 { @@ -40,7 +41,7 @@ func NewSimpleFileFinder(fs Filesystem, opts SimpleFileFinderOptions) (*SimpleFi resolver = opts.FileExtensionResolver } return &SimpleFileFinder{ - fs: fs, + fsimpl: fsimpl, opts: opts, contentTyper: StaticContentTyper{ContentType: ct}, resolver: resolver, @@ -83,7 +84,7 @@ var _ FileFinder = &SimpleFileFinder{} // // This FileFinder does not support the ObjectAt method. type SimpleFileFinder struct { - fs Filesystem + fsimpl Filesystem opts SimpleFileFinderOptions contentTyper StaticContentTyper resolver FileExtensionResolver @@ -101,7 +102,7 @@ type SimpleFileFinderOptions struct { } func (f *SimpleFileFinder) Filesystem() Filesystem { - return f.fs + return f.fsimpl } func (f *SimpleFileFinder) ContentTyper() ContentTyper { @@ -167,14 +168,15 @@ func (f *SimpleFileFinder) ListGroupKinds(ctx context.Context) ([]core.GroupKind } // List groups at top-level - groups, err := readDir(ctx, f.fs, "") + ctxFs := f.fsimpl.WithContext(ctx) + groups, err := readDir(ctxFs, "") if err != nil { return nil, err } // For all groups; also list all kinds, and add to the following list groupKinds := []core.GroupKind{} for _, group := range groups { - kinds, err := readDir(ctx, f.fs, group) + kinds, err := readDir(ctxFs, group) if err != nil { return nil, err } @@ -196,7 +198,8 @@ func (f *SimpleFileFinder) ListGroupKinds(ctx context.Context) ([]core.GroupKind // different namespaces that have been set on any object belonging to // the given GroupKind. func (f *SimpleFileFinder) ListNamespaces(ctx context.Context, gk core.GroupKind) (sets.String, error) { - entries, err := readDir(ctx, f.fs, f.kindKeyPath(gk)) + ctxFs := f.fsimpl.WithContext(ctx) + entries, err := readDir(ctxFs, f.kindKeyPath(gk)) if err != nil { return nil, err } @@ -211,7 +214,8 @@ func (f *SimpleFileFinder) ListNamespaces(ctx context.Context, gk core.GroupKind func (f *SimpleFileFinder) ListObjectIDs(ctx context.Context, gk core.GroupKind, namespace string) (core.UnversionedObjectIDSet, error) { // If namespace is empty, the names will be in ./, otherwise .// namesDir := filepath.Join(f.kindKeyPath(gk), namespace) - entries, err := readDir(ctx, f.fs, namesDir) + ctxFs := f.fsimpl.WithContext(ctx) + entries, err := readDir(ctxFs, namesDir) if err != nil { return nil, err } @@ -228,7 +232,8 @@ func (f *SimpleFileFinder) ListObjectIDs(ctx context.Context, gk core.GroupKind, // If f.SubDirectoryFileName != "", the file names already match .metadata.name // Make sure the metadata file ./<.metadata.name>/. actually exists expectedPath := filepath.Join(namesDir, entry, f.opts.SubDirectoryFileName+ext) - if exists, _ := f.fs.Exists(ctx, expectedPath); !exists { + + if exists, _ := Exists(ctxFs, expectedPath); !exists { continue } } else { @@ -246,9 +251,9 @@ func (f *SimpleFileFinder) ListObjectIDs(ctx context.Context, gk core.GroupKind, return ids, nil } -func readDir(ctx context.Context, fs Filesystem, dir string) ([]string, error) { - fi, err := fs.Stat(ctx, dir) - if os.IsNotExist(err) { +func readDir(ctxFs FS, dir string) ([]string, error) { + fi, err := ctxFs.Stat(dir) + if errors.Is(err, os.ErrNotExist) { // It's ok if the directory doesn't exist (yet), we just don't have any items then :) return nil, nil } else if !fi.IsDir() { @@ -257,7 +262,7 @@ func readDir(ctx context.Context, fs Filesystem, dir string) ([]string, error) { } // When we know that path is a directory, go ahead and read it - entries, err := fs.ReadDir(ctx, dir) + entries, err := ctxFs.ReadDir(dir) if err != nil { return nil, err } diff --git a/pkg/storage/filesystem/filesystem.go b/pkg/storage/filesystem/filesystem.go index 2d21f9c6..f0ecd0c8 100644 --- a/pkg/storage/filesystem/filesystem.go +++ b/pkg/storage/filesystem/filesystem.go @@ -2,40 +2,33 @@ package filesystem import ( "context" + "errors" + "io/fs" "os" - "path/filepath" "strconv" "github.com/spf13/afero" - "github.com/weaveworks/libgitops/pkg/storage/core" + "github.com/weaveworks/libgitops/pkg/storage/commit" ) -// Filesystem extends afero.Fs and afero.Afero with contexts added to every method. type Filesystem interface { + WithContext(ctx context.Context) FS + RefResolver() commit.RefResolver +} - // Members of afero.Fs +type FS interface { + fs.StatFS + fs.ReadDirFS + fs.ReadFileFS // MkdirAll creates a directory path and all parents that does not exist // yet. - MkdirAll(ctx context.Context, path string, perm os.FileMode) error + MkdirAll(path string, perm os.FileMode) error // Remove removes a file identified by name, returning an error, if any // happens. - Remove(ctx context.Context, name string) error - // Stat returns a FileInfo describing the named file, or an error, if any - // happens. - Stat(ctx context.Context, name string) (os.FileInfo, error) - - // Members of afero.Afero - - ReadDir(ctx context.Context, dirname string) ([]os.FileInfo, error) - - Exists(ctx context.Context, path string) (bool, error) + Remove(name string) error - ReadFile(ctx context.Context, filename string) ([]byte, error) - - WriteFile(ctx context.Context, filename string, data []byte, perm os.FileMode) error - - Walk(ctx context.Context, root string, walkFn filepath.WalkFunc) error + WriteFile(filename string, data []byte, perm os.FileMode) error // Custom methods @@ -49,85 +42,121 @@ type Filesystem interface { // file content, or the latest Git commit when the file was // changed. // - // os.IsNotExist(err) can be used to check if the file doesn't - // exist. - Checksum(ctx context.Context, filename string) (string, error) + // Like Stat(filename), os.ErrNotExist is returned if the file does + // not exist, such that errors.Is(err, os.ErrNotExist) can be used + // to check. + Checksum(filename string) (string, error) // RootDirectory specifies where on disk the root directory is stored. // This path MUST be absolute. All other paths for the other methods // MUST be relative to this directory. - RootDirectory() string - - VersionRefResolver() core.VersionRefResolver + //RootDirectory() (string, error) } -// NewOSFilesystem creates a new afero.OsFs for the local directory, using -// NewFilesystem underneath. -func NewOSFilesystem(rootDir string) Filesystem { - return NewFilesystem(afero.NewOsFs(), rootDir) +type ContextFS interface { + Open(ctx context.Context, name string) (fs.File, error) + Stat(ctx context.Context, name string) (fs.FileInfo, error) + ReadDir(ctx context.Context, name string) ([]fs.DirEntry, error) + ReadFile(ctx context.Context, name string) ([]byte, error) + MkdirAll(ctx context.Context, path string, perm os.FileMode) error + Remove(ctx context.Context, name string) error + WriteFile(ctx context.Context, filename string, data []byte, perm os.FileMode) error + Checksum(ctx context.Context, filename string) (string, error) + //RootDirectory(ctx context.Context) (string, error) } -// NewFilesystem wraps an underlying afero.Fs without context knowledge, -// in a Filesystem-compliant implementation; scoped at the given directory -// (i.e. wrapped in afero.NewBasePathFs(fs, rootDir)). -// -// Checksum is calculated based on the modification timestamp of the file. -func NewFilesystem(fs afero.Fs, rootDir string) Filesystem { - // TODO: rootDir validation? It must be absolute, exist, and be a directory. - return &filesystem{afero.NewBasePathFs(fs, rootDir), rootDir} +// Exists uses the ctxFs.Stat() method to check whether the file exists. +// If os.ErrNotExist is returned from the stat call, the return value is +// false, nil. If another error occurred, then false, err is returned. +// If err == nil, then true, nil is returned. +func Exists(ctxFs FS, name string) (bool, error) { + _, err := ctxFs.Stat(name) + if errors.Is(err, os.ErrNotExist) { + return false, nil + } + if err != nil { + return false, err + } + return true, nil } -type filesystem struct { - fs afero.Fs - rootDir string +func FromContext(ctxFs ContextFS) Filesystem { + return &fromCtxFs{ctxFs} } -func (f *filesystem) RootDirectory() string { - return f.rootDir +type fromCtxFs struct { + ctxFs ContextFS } -func (f *filesystem) Checksum(ctx context.Context, filename string) (string, error) { - fi, err := f.Stat(ctx, filename) - if err != nil { - return "", err - } - return checksumFromFileInfo(fi), nil +func (f *fromCtxFs) WithContext(ctx context.Context) FS { + return &fromCtxFsMapper{f, ctx} } -func (f *filesystem) MkdirAll(_ context.Context, path string, perm os.FileMode) error { - return f.fs.MkdirAll(path, perm) +type fromCtxFsMapper struct { + *fromCtxFs + ctx context.Context } -func (f *filesystem) Remove(_ context.Context, name string) error { - return f.fs.Remove(name) +func (f *fromCtxFsMapper) Open(name string) (fs.File, error) { + return f.ctxFs.Open(f.ctx, name) } - -func (f *filesystem) Stat(_ context.Context, name string) (os.FileInfo, error) { - return f.fs.Stat(name) +func (f *fromCtxFsMapper) Stat(name string) (fs.FileInfo, error) { + return f.ctxFs.Stat(f.ctx, name) } - -func (f *filesystem) ReadDir(_ context.Context, dirname string) ([]os.FileInfo, error) { - return afero.ReadDir(f.fs, dirname) +func (f *fromCtxFsMapper) ReadDir(name string) ([]fs.DirEntry, error) { + return f.ctxFs.ReadDir(f.ctx, name) +} +func (f *fromCtxFsMapper) ReadFile(name string) ([]byte, error) { + return f.ctxFs.ReadFile(f.ctx, name) +} +func (f *fromCtxFsMapper) MkdirAll(path string, perm os.FileMode) error { + return f.ctxFs.MkdirAll(f.ctx, path, perm) +} +func (f *fromCtxFsMapper) Remove(name string) error { + return f.ctxFs.Remove(f.ctx, name) +} +func (f *fromCtxFsMapper) WriteFile(filename string, data []byte, perm os.FileMode) error { + return f.ctxFs.WriteFile(f.ctx, filename, data, perm) +} +func (f *fromCtxFsMapper) Checksum(filename string) (string, error) { + return f.ctxFs.Checksum(f.ctx, filename) } -func (f *filesystem) Exists(_ context.Context, path string) (bool, error) { - return afero.Exists(f.fs, path) +// NewOSFilesystem creates a new afero.OsFs for the local directory, using +// NewFilesystem underneath. +func NewOSFilesystem(rootDir string) Filesystem { + return FilesystemFromAfero(afero.NewOsFs()) } -func (f *filesystem) ReadFile(_ context.Context, filename string) ([]byte, error) { - return afero.ReadFile(f.fs, filename) +// NewFilesystem wraps an underlying afero.Fs without context knowledge, +// in a Filesystem-compliant implementation; scoped at the given directory +// (i.e. wrapped in afero.NewBasePathFs(fs, rootDir)). +// +// Checksum is calculated based on the modification timestamp of the file. +func FilesystemFromAfero(fs afero.Fs) Filesystem { + // TODO: rootDir validation? It must be absolute, exist, and be a directory. + return &nopCtx{&filesystem{afero.NewIOFS(fs)}} } -func (f *filesystem) WriteFile(_ context.Context, filename string, data []byte, perm os.FileMode) error { - return afero.WriteFile(f.fs, filename, data, perm) +type nopCtx struct { + fs FS } -func (f *filesystem) Walk(_ context.Context, root string, walkFn filepath.WalkFunc) error { - return afero.Walk(f.fs, root, walkFn) +func (c *nopCtx) WithContext(context.Context) FS { return c.fs } + +type filesystem struct { + afero.IOFS } -func (f *filesystem) VersionRefResolver() core.VersionRefResolver { - return nil +func (f *filesystem) WriteFile(filename string, data []byte, perm os.FileMode) error { + return afero.WriteFile(f.IOFS.Fs, filename, data, perm) +} +func (f *filesystem) Checksum(filename string) (string, error) { + fi, err := f.Stat(filename) + if err != nil { + return "", err + } + return checksumFromFileInfo(fi), nil } func checksumFromFileInfo(fi os.FileInfo) string { diff --git a/pkg/storage/filesystem/storage.go b/pkg/storage/filesystem/storage.go index 8aeb2fd1..df9c694b 100644 --- a/pkg/storage/filesystem/storage.go +++ b/pkg/storage/filesystem/storage.go @@ -2,12 +2,14 @@ package filesystem import ( "context" + "errors" "fmt" "os" "path/filepath" "github.com/weaveworks/libgitops/pkg/content" "github.com/weaveworks/libgitops/pkg/storage" + "github.com/weaveworks/libgitops/pkg/storage/commit" "github.com/weaveworks/libgitops/pkg/storage/core" "k8s.io/apimachinery/pkg/util/sets" ) @@ -44,8 +46,8 @@ func (r *Generic) FileFinder() FileFinder { return r.fileFinder } -func (r *Generic) VersionRefResolver() core.VersionRefResolver { - return r.fileFinder.Filesystem().VersionRefResolver() +func (r *Generic) RefResolver() commit.RefResolver { + return r.fileFinder.Filesystem().RefResolver() } func (r *Generic) Read(ctx context.Context, id core.UnversionedObjectID) ([]byte, error) { @@ -55,25 +57,32 @@ func (r *Generic) Read(ctx context.Context, id core.UnversionedObjectID) ([]byte return nil, err } // Check if the resource indicated by key exists - if !r.exists(ctx, p) { + exists, err := r.exists(ctx, p) + if err != nil { + return nil, err + } + if !exists { return nil, core.NewErrNotFound(id) } // Read the file - return r.FileFinder().Filesystem().ReadFile(ctx, p) + return r.FileFinder().Filesystem().WithContext(ctx).ReadFile(p) } -func (r *Generic) Exists(ctx context.Context, id core.UnversionedObjectID) bool { +func (r *Generic) Exists(ctx context.Context, id core.UnversionedObjectID) (bool, error) { // Get the path and verify namespacing info p, err := r.getPath(ctx, id) if err != nil { - return false + return false, err } return r.exists(ctx, p) } -func (r *Generic) exists(ctx context.Context, path string) bool { - exists, _ := r.FileFinder().Filesystem().Exists(ctx, path) - return exists +func (r *Generic) fsFor(ctx context.Context) FS { + return r.FileFinder().Filesystem().WithContext(ctx) +} + +func (r *Generic) exists(ctx context.Context, path string) (bool, error) { + return Exists(r.fsFor(ctx), path) } func (r *Generic) Checksum(ctx context.Context, id core.UnversionedObjectID) (string, error) { @@ -83,10 +92,11 @@ func (r *Generic) Checksum(ctx context.Context, id core.UnversionedObjectID) (st return "", err } // Return a "high level" error if the file does not exist - checksum, err := r.FileFinder().Filesystem().Checksum(ctx, p) - if os.IsNotExist(err) { + checksum, err := r.fsFor(ctx).Checksum(p) + if errors.Is(err, os.ErrNotExist) { return "", core.NewErrNotFound(id) - } else if err != nil { + } + if err != nil { return "", err } return checksum, nil @@ -98,6 +108,7 @@ func (r *Generic) ContentType(ctx context.Context, id core.UnversionedObjectID) if err != nil { return "", err } + // The object doesn't necessarily need to exist return r.FileFinder().ContentTyper().ContentTypeForPath(ctx, r.fileFinder.Filesystem(), p) } @@ -109,13 +120,17 @@ func (r *Generic) Write(ctx context.Context, id core.UnversionedObjectID, conten } // Create the underlying directories if they do not exist already - if !r.exists(ctx, p) { - if err := r.FileFinder().Filesystem().MkdirAll(ctx, filepath.Dir(p), 0755); err != nil { + exists, err := r.exists(ctx, p) + if err != nil { + return err + } + if !exists { + if err := r.fsFor(ctx).MkdirAll(filepath.Dir(p), 0755); err != nil { return err } } // Write the file content - return r.FileFinder().Filesystem().WriteFile(ctx, p, content, 0664) + return r.fsFor(ctx).WriteFile(p, content, 0664) } func (r *Generic) Delete(ctx context.Context, id core.UnversionedObjectID) error { @@ -126,11 +141,15 @@ func (r *Generic) Delete(ctx context.Context, id core.UnversionedObjectID) error } // Check if the resource indicated by key exists - if !r.exists(ctx, p) { + exists, err := r.exists(ctx, p) + if err != nil { + return err + } + if !exists { return core.NewErrNotFound(id) } // Remove the file - return r.FileFinder().Filesystem().Remove(ctx, p) + return r.fsFor(ctx).Remove(p) } // ListGroupKinds returns all known GroupKinds by the implementation at that diff --git a/pkg/storage/filesystem/unstructured/btree/btree_versioned_index.go b/pkg/storage/filesystem/unstructured/btree/btree_versioned_index.go index fb133e39..13eca115 100644 --- a/pkg/storage/filesystem/unstructured/btree/btree_versioned_index.go +++ b/pkg/storage/filesystem/unstructured/btree/btree_versioned_index.go @@ -12,6 +12,14 @@ var ( ErrVersionRefAlreadyExists = errors.New("version ref tree already exists") ) +/* + New Commit Event: + -> UnstructuredStorage.Sync(ctx), where ctx has + +*/ + +// VersionedIndex represents a set of Indexes that are built as copy-on-write +// extensions on top of each other. type VersionedIndex interface { VersionedTree(ref string) (Index, bool) NewVersionedTree(ref, base string) (Index, error) diff --git a/pkg/storage/filesystem/unstructured/event/storage.go b/pkg/storage/filesystem/unstructured/event/storage.go index c060e578..53a64bf6 100644 --- a/pkg/storage/filesystem/unstructured/event/storage.go +++ b/pkg/storage/filesystem/unstructured/event/storage.go @@ -6,7 +6,7 @@ import ( gosync "sync" "github.com/sirupsen/logrus" - "github.com/weaveworks/libgitops/pkg/serializer" + "github.com/weaveworks/libgitops/pkg/frame" "github.com/weaveworks/libgitops/pkg/storage" "github.com/weaveworks/libgitops/pkg/storage/core" "github.com/weaveworks/libgitops/pkg/storage/event" @@ -15,6 +15,7 @@ import ( "github.com/weaveworks/libgitops/pkg/storage/filesystem/fileevents/inotify" "github.com/weaveworks/libgitops/pkg/storage/filesystem/unstructured" "github.com/weaveworks/libgitops/pkg/util/sync" + "k8s.io/apimachinery/pkg/watch" ) // Storage is a union of unstructured.Storage and fileevents.Storage. @@ -50,7 +51,7 @@ func NewManifest( if err != nil { return nil, err } - unstructuredRaw, err := unstructured.NewGeneric(fsRaw, recognizer, pathExcluder, serializer.NewFrameReaderFactory()) + unstructuredRaw, err := unstructured.NewGeneric(fsRaw, recognizer, pathExcluder, frame.DefaultFactory()) if err != nil { return nil, err } @@ -101,7 +102,7 @@ var _ Storage = &Generic{} // in sync, and sends high-level ObjectEvents upstream. // // This implementation does not support different VersionRefs, but always stays on -// the "zero value" "" branch. +// the "zero value" "" branch. TODO type Generic struct { unstructured.Storage // the filesystem events emitter @@ -109,7 +110,7 @@ type Generic struct { // channels inbound fileevents.FileEventStream - outbound event.ObjectEventStream + outbound chan watch.Event outboundMu *gosync.Mutex // goroutine @@ -123,6 +124,7 @@ func (s *Generic) FileEventsEmitter() fileevents.Emitter { return s.emitter } +/* func (s *Generic) WatchForObjectEvents(ctx context.Context, into event.ObjectEventStream) error { s.outboundMu.Lock() defer s.outboundMu.Unlock() @@ -149,7 +151,7 @@ func (s *Generic) WatchForObjectEvents(ctx context.Context, into event.ObjectEve } } return nil // all ok -} +}*/ // Sync extends the underlying unstructured.Storage.Sync(), but optionally also // sends special "SYNC" and "ERROR" events to the returned "successful" and "duplicates" @@ -170,6 +172,7 @@ func (s *Generic) Sync(ctx context.Context) (successful, duplicates core.Unversi }) _ = duplicates.ForEach(func(id core.UnversionedObjectID) error { // Send an error upstream for the duplicate + // TODO: Struct error s.sendError(id, fmt.Errorf("%w: %s", unstructured.ErrTrackingDuplicate, id)) return nil }) diff --git a/pkg/storage/filesystem/unstructured/filefinder_mapped.go b/pkg/storage/filesystem/unstructured/filefinder_mapped.go index b843795d..4f900932 100644 --- a/pkg/storage/filesystem/unstructured/filefinder_mapped.go +++ b/pkg/storage/filesystem/unstructured/filefinder_mapped.go @@ -70,7 +70,9 @@ func (f *GenericFileFinder) ContentTyper() filesystem.ContentTyper { } func (f *GenericFileFinder) versionedIndex(ctx context.Context) (btree.Index, error) { - i, ok := f.index.VersionedTree(core.GetVersionRef(ctx).Branch()) + ref := f.Filesystem().RefResolver().GetRef(ctx) + + i, ok := f.index.VersionedTree() if ok { return i, nil } diff --git a/pkg/storage/filesystem/unstructured/interfaces.go b/pkg/storage/filesystem/unstructured/interfaces.go index 16e6680e..4943b609 100644 --- a/pkg/storage/filesystem/unstructured/interfaces.go +++ b/pkg/storage/filesystem/unstructured/interfaces.go @@ -4,6 +4,7 @@ import ( "context" "github.com/weaveworks/libgitops/pkg/frame" + "github.com/weaveworks/libgitops/pkg/storage/commit" "github.com/weaveworks/libgitops/pkg/storage/core" "github.com/weaveworks/libgitops/pkg/storage/filesystem" ) @@ -84,11 +85,11 @@ type FileFinder interface { // on the existing versionref "base". head must be non-nil, but base can be nil, if it is // desired that "head" has no parent, and hence, is blank. An error is returned if head is // nil, or base does not exist. - RegisterVersionRef(head, base core.VersionRef) error + RegisterVersionRef(head commit.Ref, base commit.Hash) error // HasVersionRef returns true if the given head version ref has been registered. - HasVersionRef(head core.VersionRef) bool + HasVersionRef(head commit.Ref) bool // DeleteVersionRef deletes the given head version ref. - DeleteVersionRef(head core.VersionRef) + DeleteVersionRef(head commit.Ref) } // ChecksumPath is a tuple of a given Checksum and relative file Path, diff --git a/pkg/storage/filesystem/unstructured/storage.go b/pkg/storage/filesystem/unstructured/storage.go index 730eea83..ac2bdf3b 100644 --- a/pkg/storage/filesystem/unstructured/storage.go +++ b/pkg/storage/filesystem/unstructured/storage.go @@ -8,6 +8,7 @@ import ( "github.com/sirupsen/logrus" "github.com/weaveworks/libgitops/pkg/content" "github.com/weaveworks/libgitops/pkg/frame" + "github.com/weaveworks/libgitops/pkg/storage/commit" "github.com/weaveworks/libgitops/pkg/storage/core" "github.com/weaveworks/libgitops/pkg/storage/filesystem" ) @@ -66,8 +67,9 @@ func (s *Generic) Sync(ctx context.Context) (successful, duplicates core.Unversi successful = core.NewUnversionedObjectIDSet() duplicates = core.NewUnversionedObjectIDSet() - ref := core.GetVersionRef(ctx) - if !fileFinder.HasVersionRef(ref) { + // If the context carries a + ref, ok := commit.GetRef(ctx) + if ok && !fileFinder.HasVersionRef(ref) { if err = fileFinder.RegisterVersionRef(ref, nil); err != nil { return } @@ -154,7 +156,7 @@ func RecognizeIDsInFile( contentTyper := fileFinder.ContentTyper() // Get the current checksum of the file - currentChecksum, err := fs.Checksum(ctx, filePath) + currentChecksum, err := fs.WithContext(ctx).Checksum(filePath) if err != nil { return nil, nil, false, fmt.Errorf("Could not get checksum for file %q: %v", filePath, err) } @@ -175,7 +177,7 @@ func RecognizeIDsInFile( // If the file is not known to the FileFinder yet, or if the checksum // was empty, read the file, and recognize it. - fileContent, err := fs.ReadFile(ctx, filePath) + fileContent, err := fs.WithContext(ctx).ReadFile(filePath) if err != nil { return nil, nil, false, fmt.Errorf("Could not read file %q: %v", filePath, err) } @@ -185,6 +187,7 @@ func RecognizeIDsInFile( return nil, nil, false, fmt.Errorf("Could not get content type for file %q: %v", filePath, err) } // Create a new FrameReader for the given ContentType and ReadCloser + // TODO: Use a recognizing frame.Reader here fr := framingFactory.NewReader(ct, content.FromBytes(fileContent)) // Recognize all IDs in the file versionedIDs, err := recognizer.RecognizeObjectIDs(filePath, fr) diff --git a/pkg/storage/interfaces.go b/pkg/storage/interfaces.go index 9c026d9d..fe392646 100644 --- a/pkg/storage/interfaces.go +++ b/pkg/storage/interfaces.go @@ -5,6 +5,7 @@ import ( "errors" "github.com/weaveworks/libgitops/pkg/content" + "github.com/weaveworks/libgitops/pkg/storage/commit" "github.com/weaveworks/libgitops/pkg/storage/core" "k8s.io/apimachinery/pkg/util/sets" ) @@ -34,11 +35,13 @@ type Storage interface { // StorageCommon is an interface that contains the resources both needed // by Reader and Writer. type StorageCommon interface { - VersionRefResolver() core.VersionRefResolver + // RefResolver is able to resolve version references to immutable + // commit hashes. + RefResolver() commit.RefResolver // Namespacer gives access to the namespacer that is used Namespacer() Namespacer // Exists checks if the resource indicated by the ID exists. - Exists(ctx context.Context, id core.UnversionedObjectID) bool + Exists(ctx context.Context, id core.UnversionedObjectID) (bool, error) } // Namespacer is an interface that lets the caller know if a GroupKind is namespaced From 70deb2a8a1f3fa659421e9cc95d33a7208f880c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lucas=20K=C3=A4ldstr=C3=B6m?= Date: Mon, 9 Aug 2021 13:41:24 +0300 Subject: [PATCH 149/149] WIP just for demo --- go.mod | 1 + go.sum | 303 +++++++++++++++++- .../client/transactional/interfaces.go | 4 +- pkg/storage/client/transactional/test_test.go | 51 +++ 4 files changed, 348 insertions(+), 11 deletions(-) create mode 100644 pkg/storage/client/transactional/test_test.go diff --git a/go.mod b/go.mod index a8a35755..dbc33a44 100644 --- a/go.mod +++ b/go.mod @@ -35,6 +35,7 @@ require ( k8s.io/apimachinery v0.21.3 k8s.io/kube-openapi v0.0.0-20210527164424-3c818078ee3d k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471 + sigs.k8s.io/cluster-api v0.4.0 sigs.k8s.io/controller-runtime v0.9.5 sigs.k8s.io/kustomize/kyaml v0.11.1-0.20210721155208-d6ce84604738 sigs.k8s.io/yaml v1.2.0 diff --git a/go.sum b/go.sum index f44c0ba4..0bb87307 100644 --- a/go.sum +++ b/go.sum @@ -9,18 +9,33 @@ cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6T cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= @@ -32,6 +47,9 @@ github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= +github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= +github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk= github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= @@ -40,17 +58,20 @@ github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMo github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 h1:YoJbenK9C67SkzkDfmQuVln04ygHj3vjZfd9FL+GmQQ= github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk= github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= @@ -60,6 +81,7 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -69,6 +91,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= @@ -77,14 +101,20 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/coredns/caddy v1.1.0 h1:ezvsPrT/tA/7pYDBZxu0cT0VmWk75AfIaf6GSYCNMf0= +github.com/coredns/caddy v1.1.0/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4= +github.com/coredns/corefile-migration v1.0.12 h1:TJGATo0YLQJVIKJZLajXE1IrhRFtYTR1cYsGIT1YNEk= +github.com/coredns/corefile-migration v1.0.12/go.mod h1:NJOI8ceUF/NTgEwtjD+TUq3/BnH/GF7WAM3RzCa3hBo= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -93,6 +123,7 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= @@ -102,9 +133,15 @@ github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/drone/envsubst/v2 v2.0.0-20210615175204-7bf45dbf5372/go.mod h1:esf2rsHFNlZlxsqsZDojNBcnNs5REqIvRrWRHqX0vEU= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= @@ -116,15 +153,20 @@ github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3 github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= +github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fluxcd/go-git-providers v0.2.0 h1:2dxT4r9UDjKwsNFmO9wcSR2FUqKyvsDwha5b/zvK1Ko= github.com/fluxcd/go-git-providers v0.2.0/go.mod h1:nRgNpHZmZhrsyNSma1JcAhjUG9xrqMGJcIUr9K7M7vk= github.com/fluxcd/pkg/ssh v0.2.0 h1:e9V+HReOL7czm7edVzYS1e+CnFKz1/kHiUNfLRpBdH8= @@ -132,12 +174,16 @@ github.com/fluxcd/pkg/ssh v0.2.0/go.mod h1:EpQC7Ztdlbi8S/dlYXqVDZtHtLpN3FNl3N6zW github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= @@ -164,24 +210,61 @@ github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc= github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/zapr v0.4.0 h1:uc1uML3hRYL9/ZZPdgHS/n8Nzo+eaYL/Efxkkamf7OM= github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= github.com/go-openapi/spec v0.20.3 h1:uH9RQ6vdyPSs2pSy9fL8QPspDF2AMIMPtmK5coSSjtQ= github.com/go-openapi/spec v0.20.3/go.mod h1:gG4F8wdEDN+YPBMVnzE85Rbhf+Th2DTvA9nFPQ5AYEg= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/gobuffalo/flect v0.2.3 h1:f/ZukRnSNA/DUpSNDadko7Qc0PhGvsew35p/2tu+CRY= +github.com/gobuffalo/flect v0.2.3/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc= +github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -191,19 +274,24 @@ github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -213,8 +301,10 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= @@ -223,25 +313,40 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github/v32 v32.1.0 h1:GWkQOdXqviCPx7Q7Fj+KyPoGm4SwHRh8rheoPhd27II= github.com/google/go-github/v32 v32.1.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI= +github.com/google/go-github/v33 v33.0.0/go.mod h1:GMdDnVZY/2TsWgp/lkYnpSAh6TrzhANBBwm6k6TTEXg= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= @@ -256,6 +361,7 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORR github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= @@ -292,6 +398,7 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= @@ -339,25 +446,35 @@ github.com/labstack/echo v3.3.10+incompatible h1:pGRcYk231ExFAyoAjAfD85kQzRJCRI8 github.com/labstack/echo v3.3.10+incompatible/go.mod h1:0INS7j/VjnFxD4E2wkz67b8cVwCLbBmJyDaka6Cmk1s= github.com/labstack/gommon v0.3.0 h1:JEeO0bvc78PKdyHxloTKiF8BD5iGrH8T6MSeGvSgob0= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/markbates/pkger v0.17.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A= github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.13 h1:qdl+GuBjcsKKDco5BsxPJlId98mSWNKqYA+Co0SC1yA= github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= @@ -367,11 +484,13 @@ github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrk github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -393,21 +512,28 @@ github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= github.com/onsi/gomega v1.14.0 h1:ep6kpPVwmr/nTbklSx2nrLNSIO62DoYAhnPNIMhK8gI= github.com/onsi/gomega v1.14.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -448,9 +574,11 @@ github.com/rjeczalik/notify v0.9.2/go.mod h1:aErll2f0sUX9PXZnVNyeiObbmTlk5jnMoCa github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= @@ -470,10 +598,13 @@ github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTd github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -481,6 +612,7 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.8.0/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -494,6 +626,7 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= @@ -502,6 +635,7 @@ github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6Kllzaw github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasttemplate v1.0.1 h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8WdUSz8= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= +github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/xanzy/go-gitlab v0.43.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug= github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI= github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= @@ -509,16 +643,29 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca h1:1CFlNzQhALwjS9mBAUkycX616GzgsuYUOCHA5+HSlXI= github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/otel v1.0.0-RC2 h1:SHhxSjB+omnGZPgGlKe+QMp3MyazcOHdQ8qwo89oKbg= go.opentelemetry.io/otel v1.0.0-RC2/go.mod h1:w1thVQ7qbAy8MHb0IFj8a5Q2QU0l2ksf8u/CN8m3NOM= go.opentelemetry.io/otel/exporters/jaeger v1.0.0-RC2 h1:RF0nWsIDpDBe+s06lkLxUw9CWQUAhO6hBSxxB7dz45s= @@ -547,15 +694,18 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/ go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.18.1 h1:CSUJ2mjFszzEWt4CdKISEuChVIXGBn3lAPwkRGyVrc4= go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -585,8 +735,10 @@ golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -595,12 +747,16 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449 h1:xUIPaMhvROX9dhPvRCenIJtU78+lbEenGbgqB5hfHCQ= golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -609,6 +765,7 @@ golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -616,6 +773,7 @@ golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -626,14 +784,24 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/oauth2 v0.0.0-20180227000427-d7d64896b5ff/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -642,16 +810,27 @@ golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAG golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210615190721-d04028783cf1 h1:x622Z2o4hgCr/4CiKWc51jHVKaWdtVpBNmEI8wI9Qns= +golang.org/x/oauth2 v0.0.0-20210615190721-d04028783cf1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180224232135-f6cff0780e54/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -664,6 +843,7 @@ golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -692,21 +872,36 @@ golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -728,11 +923,13 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -743,6 +940,7 @@ golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -765,13 +963,29 @@ golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.0 h1:po9/4sTYwZU9lPhi1tOrb4hCv3qrhiQ77LZfGa2OjwY= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.2 h1:kRBLX7v7Af8W7Gdbbc908OJcdgtK8bOz9Uaj8/F1ACA= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -788,7 +1002,19 @@ google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsb google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= google.golang.org/appengine v1.0.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -796,6 +1022,7 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= @@ -815,12 +1042,33 @@ google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a h1:pOwg4OoaRYScjmR4LlLgdtnyoHYTSAVhhqe5uPdpII8= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -830,9 +1078,19 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.39.0 h1:Klz8I9kdtkIN6EpHHUOMLCYhTn/2WAe5a0s1hcBkdTI= google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -862,6 +1120,7 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= @@ -875,6 +1134,7 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= @@ -891,40 +1151,65 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.21.2/go.mod h1:Lv6UGJZ1rlMI1qusN8ruAp9PUBFyBwpEHAdG24vIsiU= k8s.io/api v0.21.3 h1:cblWILbLO8ar+Fj6xdDGr603HRsf8Wu9E9rngJeprZQ= k8s.io/api v0.21.3/go.mod h1:hUgeYHUbBp23Ue4qdX9tR8/ANi/g3ehylAqDn9NWVOg= +k8s.io/apiextensions-apiserver v0.21.2/go.mod h1:+Axoz5/l3AYpGLlhJDfcVQzCerVYq3K3CvDMvw6X1RA= k8s.io/apiextensions-apiserver v0.21.3 h1:+B6biyUWpqt41kz5x6peIsljlsuwvNAp/oFax/j2/aY= k8s.io/apiextensions-apiserver v0.21.3/go.mod h1:kl6dap3Gd45+21Jnh6utCx8Z2xxLm8LGDkprcd+KbsE= +k8s.io/apimachinery v0.21.2/go.mod h1:CdTY8fU/BlvAbJ2z/8kBwimGki5Zp8/fbVuLY8gJumM= k8s.io/apimachinery v0.21.3 h1:3Ju4nvjCngxxMYby0BimUk+pQHPOQp3eCGChk5kfVII= k8s.io/apimachinery v0.21.3/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI= +k8s.io/apiserver v0.21.2/go.mod h1:lN4yBoGyiNT7SC1dmNk0ue6a5Wi6O3SWOIw91TsucQw= +k8s.io/apiserver v0.21.3 h1:QxAgE1ZPQG5cPlHScHTnLxP9H/kU3zjH1Vnd8G+n5OI= k8s.io/apiserver v0.21.3/go.mod h1:eDPWlZG6/cCCMj/JBcEpDoK+I+6i3r9GsChYBHSbAzU= +k8s.io/cli-runtime v0.21.2/go.mod h1:8u/jFcM0QpoI28f6sfrAAIslLCXUYKD5SsPPMWiHYrI= +k8s.io/client-go v0.21.2/go.mod h1:HdJ9iknWpbl3vMGtib6T2PyI/VYxiZfq936WNVHBRrA= k8s.io/client-go v0.21.3 h1:J9nxZTOmvkInRDCzcSNQmPJbDYN/PjlxXT9Mos3HcLg= k8s.io/client-go v0.21.3/go.mod h1:+VPhCgTsaFmGILxR/7E1N0S+ryO010QBeNCv5JwRGYU= +k8s.io/cluster-bootstrap v0.21.2 h1:GXvCxl619A0edhAprX8U5gUZ5lQCUf7xhDa7SkXnlx0= +k8s.io/cluster-bootstrap v0.21.2/go.mod h1:OEm/gajtWz/ohbS4NGxkyTp/6f1fW3TBThgCQ1ljhHo= +k8s.io/code-generator v0.21.2/go.mod h1:8mXJDCB7HcRo1xiEQstcguZkbxZaqeUOrO9SsicWs3U= k8s.io/code-generator v0.21.3/go.mod h1:K3y0Bv9Cz2cOW2vXUrNZlFbflhuPvuadW6JdnN6gGKo= +k8s.io/component-base v0.21.2/go.mod h1:9lvmIThzdlrJj5Hp8Z/TOgIkdfsNARQ1pT+3PByuiuc= k8s.io/component-base v0.21.3 h1:4WuuXY3Npa+iFfi2aDRiOz+anhNvRfye0859ZgfC5Og= k8s.io/component-base v0.21.3/go.mod h1:kkuhtfEHeZM6LkX0saqSK8PbdO7A0HigUngmhhrwfGQ= +k8s.io/component-helpers v0.21.2/go.mod h1:DbyFt/A0p6Cv+R5+QOGSJ5f5t4xDfI8Yb89a57DgJlQ= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts= k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= +k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM= +k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kube-openapi v0.0.0-20210527164424-3c818078ee3d h1:lUK8GPtuJy8ClWZhuvKoaLdKGPLq9H1PxWp7VPBZBkU= k8s.io/kube-openapi v0.0.0-20210527164424-3c818078ee3d/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/kubectl v0.21.2/go.mod h1:PgeUclpG8VVmmQIl8zpLar3IQEpFc9mrmvlwY3CK1xo= +k8s.io/metrics v0.21.2/go.mod h1:wzlOINZMCtWq8dR9gHlyaOemmYlOpAoldEIXE82gAhI= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210527160623-6fdb442a123b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471 h1:DnzUXII7sVg1FJ/4JX6YDRJfLNAC7idRatPwe07suiI= k8s.io/utils v0.0.0-20210722164352-7f3ee0f31471/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.19/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/cluster-api v0.4.0 h1:y9MxtU1uW9r9JtDyOQ/9BRXZEau2PGl2yOIozaxXO0E= +sigs.k8s.io/cluster-api v0.4.0/go.mod h1:9ALETQ/6KGZ/kYiqvQGfjOx0CfVGE39d4VP3UrS5B24= +sigs.k8s.io/controller-runtime v0.9.1/go.mod h1:cTqsgnwSOsYS03XwySYZj8k6vf0+eC4FJRcCgQ9elb4= sigs.k8s.io/controller-runtime v0.9.5 h1:WThcFE6cqctTn2jCZprLICO6BaKZfhsT37uAapTNfxc= sigs.k8s.io/controller-runtime v0.9.5/go.mod h1:q6PpkM5vqQubEKUKOM6qr06oXGzOBcCby1DA9FbyZeA= +sigs.k8s.io/kustomize/api v0.8.8/go.mod h1:He1zoK0nk43Pc6NlV085xDXDXTNprtcyKZVm3swsdNY= +sigs.k8s.io/kustomize/cmd/config v0.9.10/go.mod h1:Mrby0WnRH7hA6OwOYnYpfpiY0WJIMgYrEDfwOeFdMK0= +sigs.k8s.io/kustomize/kustomize/v4 v4.1.2/go.mod h1:PxBvo4WGYlCLeRPL+ziT64wBXqbgfcalOS/SXa/tcyo= +sigs.k8s.io/kustomize/kyaml v0.10.17/go.mod h1:mlQFagmkm1P+W4lZJbJ/yaxMd8PqMRSC4cPcfUVt5Hg= sigs.k8s.io/kustomize/kyaml v0.11.1-0.20210721155208-d6ce84604738 h1:Nkg3viu9IE/TSzvYt4GGy5FkhdPk3bptXuxW5TnU9uo= sigs.k8s.io/kustomize/kyaml v0.11.1-0.20210721155208-d6ce84604738/go.mod h1:FTJxEZ86ScK184NpGSAQcfEqee0nul8oLCK30D47m4E= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno= sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/pkg/storage/client/transactional/interfaces.go b/pkg/storage/client/transactional/interfaces.go index d0f979c6..0346352b 100644 --- a/pkg/storage/client/transactional/interfaces.go +++ b/pkg/storage/client/transactional/interfaces.go @@ -72,7 +72,7 @@ type TransactionManager interface { MergeBranches(ctx context.Context, base, head core.VersionRef, commit Commit) error }*/ -type CustomTxFunc func(ctx context.Context) error +type CustomTxFunc func(ctx context.Context, writer client.Client) error type Tx interface { Commit(req commit.Request) error @@ -81,7 +81,7 @@ type Tx interface { Client() client.Client // TODO: Rename to Do/Run/Execute - Custom(CustomTxFunc) Tx + Run(CustomTxFunc) Tx Get(key core.ObjectKey, obj client.Object) Tx List(list client.ObjectList, opts ...client.ListOption) Tx diff --git a/pkg/storage/client/transactional/test_test.go b/pkg/storage/client/transactional/test_test.go new file mode 100644 index 00000000..7c0c85e5 --- /dev/null +++ b/pkg/storage/client/transactional/test_test.go @@ -0,0 +1,51 @@ +package transactional_test + +import ( + "context" + "strings" + "testing" + + "github.com/weaveworks/libgitops/pkg/storage/client" + "github.com/weaveworks/libgitops/pkg/storage/client/transactional" + "github.com/weaveworks/libgitops/pkg/storage/client/transactional/distributed/git/github" + "github.com/weaveworks/libgitops/pkg/storage/commit" + "sigs.k8s.io/cluster-api/api/v1alpha3" +) + +func TestFoo(t *testing.T) { + gitClient, _ := transactional.NewGeneric(nil, nil) + ctx := context.Background() + + var machineList v1alpha3.MachineList + _ = gitClient. + AtRef(commit.AtBranch("main")). // Start tx from main branch + Transaction(ctx, "foo-update-"). // Autogenerated suffix + List(&machineList). // Load all Machine object into machineList + Run(func(ctx context.Context, txClient client.Client) error { + for i := range machineList.Items { + machine := &machineList.Items[i] + // Skip all machines whose names don't start with foo + if !strings.HasPrefix(machine.Name, "foo") { + continue + } + machine.ClusterName = "weave-gitops-cluster" + // Update the Machine object in Git + if err := txClient.Update(ctx, machine); err != nil { + return err + } + } + return nil + }). // Do a commit, and a PR using go-git-providers, too. + Commit(github.PullRequest{ + Request: commit.GenericRequest{ + Name: "Lucas Käldström", + Email: "lucas@weave.works", + Title: "Update CAPI machines", + Description: "Machines with prefix foo are now in the Weave cluster", + }, + TargetBranch: "main", + Labels: []string{"kind/automatic"}, + Assignees: []string{"luxas"}, + Milestone: "v1.0.1", + }) +}