From c078768fcb0a09876e15e8d29c8941526fa46d7e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Gronowski?= Date: Wed, 6 Jul 2022 18:54:08 +0200 Subject: [PATCH 01/90] containerd/pull: Use authorization MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Paweł Gronowski --- daemon/containerd/image_pull.go | 3 +++ daemon/containerd/resolver.go | 25 +++++++++++++++++++++++++ 2 files changed, 28 insertions(+) create mode 100644 daemon/containerd/resolver.go diff --git a/daemon/containerd/image_pull.go b/daemon/containerd/image_pull.go index 2cfaa6bc4a31b..4a2b86ca8f40e 100644 --- a/daemon/containerd/image_pull.go +++ b/daemon/containerd/image_pull.go @@ -41,6 +41,9 @@ func (i *ImageService) PullImage(ctx context.Context, image, tagOrDigest string, } } + resolver := newResolverFromAuthConfig(authConfig) + opts = append(opts, containerd.WithResolver(resolver)) + _, err = i.client.Pull(ctx, ref.String(), opts...) return err } diff --git a/daemon/containerd/resolver.go b/daemon/containerd/resolver.go new file mode 100644 index 0000000000000..b172d6c661553 --- /dev/null +++ b/daemon/containerd/resolver.go @@ -0,0 +1,25 @@ +package containerd + +import ( + "github.com/containerd/containerd/remotes" + "github.com/containerd/containerd/remotes/docker" + registrytypes "github.com/docker/docker/api/types/registry" +) + +func newResolverFromAuthConfig(authConfig *registrytypes.AuthConfig) remotes.Resolver { + opts := []docker.RegistryOpt{} + if authConfig != nil { + authorizer := docker.NewDockerAuthorizer(docker.WithAuthCreds(func(_ string) (string, string, error) { + if authConfig.IdentityToken != "" { + return "", authConfig.IdentityToken, nil + } + return authConfig.Username, authConfig.Password, nil + })) + + opts = append(opts, docker.WithAuthorizer(authorizer)) + } + + return docker.NewResolver(docker.ResolverOptions{ + Hosts: docker.ConfigureDefaultRegistries(opts...), + }) +} From 44891e86a552765be8d1b624b6f2281ae0ed79f8 Mon Sep 17 00:00:00 2001 From: Nicolas De Loof Date: Wed, 6 Jul 2022 14:24:38 +0200 Subject: [PATCH 02/90] add support for image inspect requies passing context around Signed-off-by: Nicolas De Loof --- api/server/router/container/backend.go | 8 +- .../router/container/container_routes.go | 8 +- api/server/router/image/backend.go | 4 +- api/server/router/image/image_routes.go | 4 +- api/server/router/swarm/backend.go | 2 +- api/server/router/swarm/cluster_routes.go | 2 +- builder/builder.go | 6 +- builder/dockerfile/builder.go | 18 +- builder/dockerfile/containerbackend.go | 6 +- builder/dockerfile/dispatchers.go | 79 ++++--- builder/dockerfile/dispatchers_test.go | 66 +++--- builder/dockerfile/evaluator.go | 37 +-- builder/dockerfile/evaluator_test.go | 3 +- builder/dockerfile/imageprobe.go | 6 +- builder/dockerfile/internals.go | 17 +- builder/dockerfile/internals_linux.go | 3 +- builder/dockerfile/internals_linux_test.go | 5 +- builder/dockerfile/internals_windows.go | 13 +- builder/dockerfile/mockbackend_test.go | 6 +- cmd/dockerd/daemon.go | 6 +- daemon/cluster/executor/backend.go | 8 +- daemon/cluster/executor/container/adapter.go | 6 +- daemon/cluster/swarm.go | 8 +- daemon/commit.go | 5 +- daemon/containerd/cache.go | 4 +- daemon/containerd/image.go | 217 +++++++++++++++++- daemon/containerd/image_import.go | 3 +- daemon/containerd/service.go | 2 +- daemon/create.go | 23 +- daemon/daemon.go | 14 +- daemon/disk_usage.go | 2 +- daemon/image_service.go | 6 +- daemon/images/cache.go | 6 +- daemon/images/image.go | 2 +- daemon/images/image_builder.go | 4 +- daemon/images/image_delete.go | 2 +- daemon/images/image_events.go | 2 +- daemon/images/image_history.go | 4 +- daemon/images/image_import.go | 5 +- daemon/images/image_list.go | 4 +- daemon/images/image_pull.go | 2 +- daemon/images/image_tag.go | 2 +- daemon/list.go | 33 +-- daemon/list_test.go | 11 +- daemon/monitor.go | 2 +- daemon/oci_linux.go | 2 +- daemon/oci_linux_test.go | 17 +- daemon/oci_windows.go | 5 +- daemon/restart.go | 2 +- daemon/start.go | 18 +- 50 files changed, 476 insertions(+), 244 deletions(-) diff --git a/api/server/router/container/backend.go b/api/server/router/container/backend.go index 4db989a10201b..f0624eef836b3 100644 --- a/api/server/router/container/backend.go +++ b/api/server/router/container/backend.go @@ -32,14 +32,14 @@ type copyBackend interface { // stateBackend includes functions to implement to provide container state lifecycle functionality. type stateBackend interface { - ContainerCreate(config types.ContainerCreateConfig) (container.CreateResponse, error) + ContainerCreate(ctx context.Context, config types.ContainerCreateConfig) (container.CreateResponse, error) ContainerKill(name string, signal string) error ContainerPause(name string) error ContainerRename(oldName, newName string) error ContainerResize(name string, height, width int) error ContainerRestart(ctx context.Context, name string, options container.StopOptions) error ContainerRm(name string, config *types.ContainerRmConfig) error - ContainerStart(name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error + ContainerStart(ctx context.Context, name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error ContainerStop(ctx context.Context, name string, options container.StopOptions) error ContainerUnpause(name string) error ContainerUpdate(name string, hostConfig *container.HostConfig) (container.ContainerUpdateOKBody, error) @@ -54,7 +54,7 @@ type monitorBackend interface { ContainerStats(ctx context.Context, name string, config *backend.ContainerStatsConfig) error ContainerTop(name string, psArgs string) (*container.ContainerTopOKBody, error) - Containers(config *types.ContainerListOptions) ([]*types.Container, error) + Containers(ctx context.Context, config *types.ContainerListOptions) ([]*types.Container, error) } // attachBackend includes function to implement to provide container attaching functionality. @@ -68,7 +68,7 @@ type systemBackend interface { } type commitBackend interface { - CreateImageFromContainer(name string, config *backend.CreateImageConfig) (imageID string, err error) + CreateImageFromContainer(ctx context.Context, name string, config *backend.CreateImageConfig) (imageID string, err error) } // Backend is all the methods that need to be implemented to provide container specific functionality. diff --git a/api/server/router/container/container_routes.go b/api/server/router/container/container_routes.go index 6670b9ec68137..5df051fc95f4a 100644 --- a/api/server/router/container/container_routes.go +++ b/api/server/router/container/container_routes.go @@ -58,7 +58,7 @@ func (s *containerRouter) postCommit(ctx context.Context, w http.ResponseWriter, Changes: r.Form["changes"], } - imgID, err := s.backend.CreateImageFromContainer(r.Form.Get("container"), commitCfg) + imgID, err := s.backend.CreateImageFromContainer(ctx, r.Form.Get("container"), commitCfg) if err != nil { return err } @@ -91,7 +91,7 @@ func (s *containerRouter) getContainersJSON(ctx context.Context, w http.Response config.Limit = limit } - containers, err := s.backend.Containers(config) + containers, err := s.backend.Containers(ctx, config) if err != nil { return err } @@ -214,7 +214,7 @@ func (s *containerRouter) postContainersStart(ctx context.Context, w http.Respon checkpoint := r.Form.Get("checkpoint") checkpointDir := r.Form.Get("checkpoint-dir") - if err := s.backend.ContainerStart(vars["name"], hostConfig, checkpoint, checkpointDir); err != nil { + if err := s.backend.ContainerStart(ctx, vars["name"], hostConfig, checkpoint, checkpointDir); err != nil { return err } @@ -578,7 +578,7 @@ func (s *containerRouter) postContainersCreate(ctx context.Context, w http.Respo hostConfig.PidsLimit = nil } - ccr, err := s.backend.ContainerCreate(types.ContainerCreateConfig{ + ccr, err := s.backend.ContainerCreate(ctx, types.ContainerCreateConfig{ Name: name, Config: config, HostConfig: hostConfig, diff --git a/api/server/router/image/backend.go b/api/server/router/image/backend.go index ebddff686ec3f..8921e114b3e41 100644 --- a/api/server/router/image/backend.go +++ b/api/server/router/image/backend.go @@ -24,14 +24,14 @@ type imageBackend interface { ImageDelete(ctx context.Context, imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error) ImageHistory(imageName string) ([]*image.HistoryResponseItem, error) Images(ctx context.Context, opts types.ImageListOptions) ([]*types.ImageSummary, error) - GetImage(refOrID string, platform *specs.Platform) (retImg *dockerimage.Image, retErr error) + GetImage(ctx context.Context, refOrID string, platform *specs.Platform) (*dockerimage.Image, error) TagImage(imageName, repository, tag string) (string, error) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) } type importExportBackend interface { LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error - ImportImage(src string, repository string, platform *specs.Platform, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error + ImportImage(ctx context.Context, src string, repository string, platform *specs.Platform, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error ExportImage(names []string, outStream io.Writer) error } diff --git a/api/server/router/image/image_routes.go b/api/server/router/image/image_routes.go index fc397d4458925..3e06be9ce358c 100644 --- a/api/server/router/image/image_routes.go +++ b/api/server/router/image/image_routes.go @@ -68,7 +68,7 @@ func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWrite progressErr = s.backend.PullImage(ctx, image, tag, platform, metaHeaders, authConfig, output) } else { // import src := r.Form.Get("fromSrc") - progressErr = s.backend.ImportImage(src, repo, platform, tag, message, r.Body, output, r.Form["changes"]) + progressErr = s.backend.ImportImage(ctx, src, repo, platform, tag, message, r.Body, output, r.Form["changes"]) } if progressErr != nil { if !output.Flushed() { @@ -193,7 +193,7 @@ func (s *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter, r } func (s *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - image, err := s.backend.GetImage(vars["name"], nil) + image, err := s.backend.GetImage(ctx, vars["name"], nil) if err != nil { return err } diff --git a/api/server/router/swarm/backend.go b/api/server/router/swarm/backend.go index d0c7e60fb3270..fa63e363bdb14 100644 --- a/api/server/router/swarm/backend.go +++ b/api/server/router/swarm/backend.go @@ -12,7 +12,7 @@ import ( type Backend interface { Init(req types.InitRequest) (string, error) Join(req types.JoinRequest) error - Leave(force bool) error + Leave(ctx context.Context, force bool) error Inspect() (types.Swarm, error) Update(uint64, types.Spec, types.UpdateFlags) error GetUnlockKey() (string, error) diff --git a/api/server/router/swarm/cluster_routes.go b/api/server/router/swarm/cluster_routes.go index 293e4f1421218..8a77e300d6dda 100644 --- a/api/server/router/swarm/cluster_routes.go +++ b/api/server/router/swarm/cluster_routes.go @@ -56,7 +56,7 @@ func (sr *swarmRouter) leaveCluster(ctx context.Context, w http.ResponseWriter, } force := httputils.BoolValue(r, "force") - return sr.backend.Leave(force) + return sr.backend.Leave(ctx, force) } func (sr *swarmRouter) inspectCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { diff --git a/builder/builder.go b/builder/builder.go index f01563812f937..2fc371da90246 100644 --- a/builder/builder.go +++ b/builder/builder.go @@ -61,13 +61,13 @@ type ExecBackend interface { // ContainerAttachRaw attaches to container. ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool, attached chan struct{}) error // ContainerCreateIgnoreImagesArgsEscaped creates a new Docker container and returns potential warnings - ContainerCreateIgnoreImagesArgsEscaped(config types.ContainerCreateConfig) (container.CreateResponse, error) + ContainerCreateIgnoreImagesArgsEscaped(ctx context.Context, config types.ContainerCreateConfig) (container.CreateResponse, error) // ContainerRm removes a container specified by `id`. ContainerRm(name string, config *types.ContainerRmConfig) error // ContainerKill stops the container execution abruptly. ContainerKill(containerID string, sig string) error // ContainerStart starts a new container - ContainerStart(containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error + ContainerStart(ctx context.Context, containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error // ContainerWait stops processing until the given container is stopped. ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error) } @@ -81,7 +81,7 @@ type Result struct { // ImageCacheBuilder represents a generator for stateful image cache. type ImageCacheBuilder interface { // MakeImageCache creates a stateful image cache. - MakeImageCache(cacheFrom []string) ImageCache + MakeImageCache(ctx context.Context, cacheFrom []string) ImageCache } // ImageCache abstracts an image cache. diff --git a/builder/dockerfile/builder.go b/builder/dockerfile/builder.go index 820c2d102e7b6..5bc30d18af1ec 100644 --- a/builder/dockerfile/builder.go +++ b/builder/dockerfile/builder.go @@ -95,7 +95,7 @@ func (bm *BuildManager) Build(ctx context.Context, config backend.BuildConfig) ( if err != nil { return nil, err } - return b.build(source, dockerfile) + return b.build(ctx, source, dockerfile) } // builderOptions are the dependencies required by the builder @@ -147,7 +147,7 @@ func newBuilder(clientCtx context.Context, options builderOptions) (*Builder, er idMapping: options.IDMapping, imageSources: newImageSources(clientCtx, options), pathCache: options.PathCache, - imageProber: newImageProber(options.Backend, config.CacheFrom, config.NoCache), + imageProber: newImageProber(clientCtx, options.Backend, config.CacheFrom, config.NoCache), containerManager: newContainerManager(options.Backend), } @@ -181,7 +181,7 @@ func buildLabelOptions(labels map[string]string, stages []instructions.Stage) { // Build runs the Dockerfile builder by parsing the Dockerfile and executing // the instructions from the file. -func (b *Builder) build(source builder.Source, dockerfile *parser.Result) (*builder.Result, error) { +func (b *Builder) build(ctx context.Context, source builder.Source, dockerfile *parser.Result) (*builder.Result, error) { defer b.imageSources.Unmount() stages, metaArgs, err := instructions.Parse(dockerfile.AST) @@ -205,7 +205,7 @@ func (b *Builder) build(source builder.Source, dockerfile *parser.Result) (*buil buildLabelOptions(b.options.Labels, stages) dockerfile.PrintWarnings(b.Stderr) - dispatchState, err := b.dispatchDockerfileWithCancellation(stages, metaArgs, dockerfile.EscapeToken, source) + dispatchState, err := b.dispatchDockerfileWithCancellation(ctx, stages, metaArgs, dockerfile.EscapeToken, source) if err != nil { return nil, err } @@ -244,7 +244,7 @@ func printCommand(out io.Writer, currentCommandIndex int, totalCommands int, cmd return currentCommandIndex + 1 } -func (b *Builder) dispatchDockerfileWithCancellation(parseResult []instructions.Stage, metaArgs []instructions.ArgCommand, escapeToken rune, source builder.Source) (*dispatchState, error) { +func (b *Builder) dispatchDockerfileWithCancellation(ctx context.Context, parseResult []instructions.Stage, metaArgs []instructions.ArgCommand, escapeToken rune, source builder.Source) (*dispatchState, error) { dispatchRequest := dispatchRequest{} buildArgs := NewBuildArgs(b.options.BuildArgs) totalCommands := len(metaArgs) + len(parseResult) @@ -272,7 +272,7 @@ func (b *Builder) dispatchDockerfileWithCancellation(parseResult []instructions. dispatchRequest = newDispatchRequest(b, escapeToken, source, buildArgs, stagesResults) currentCommandIndex = printCommand(b.Stdout, currentCommandIndex, totalCommands, stage.SourceCode) - if err := initializeStage(dispatchRequest, &stage); err != nil { + if err := initializeStage(ctx, dispatchRequest, &stage); err != nil { return nil, err } dispatchRequest.state.updateRunConfig() @@ -290,7 +290,7 @@ func (b *Builder) dispatchDockerfileWithCancellation(parseResult []instructions. currentCommandIndex = printCommand(b.Stdout, currentCommandIndex, totalCommands, cmd) - if err := dispatch(dispatchRequest, cmd); err != nil { + if err := dispatch(ctx, dispatchRequest, cmd); err != nil { return nil, err } dispatchRequest.state.updateRunConfig() @@ -318,7 +318,7 @@ func (b *Builder) dispatchDockerfileWithCancellation(parseResult []instructions. // coming from the query parameter of the same name. // // TODO: Remove? -func BuildFromConfig(config *container.Config, changes []string, os string) (*container.Config, error) { +func BuildFromConfig(ctx context.Context, config *container.Config, changes []string, os string) (*container.Config, error) { if len(changes) == 0 { return config, nil } @@ -361,7 +361,7 @@ func BuildFromConfig(config *container.Config, changes []string, os string) (*co dispatchRequest.state.imageID = config.Image dispatchRequest.state.operatingSystem = os for _, cmd := range commands { - err := dispatch(dispatchRequest, cmd) + err := dispatch(ctx, dispatchRequest, cmd) if err != nil { return nil, errdefs.InvalidParameter(err) } diff --git a/builder/dockerfile/containerbackend.go b/builder/dockerfile/containerbackend.go index 99a6b14f6d844..ebb261a1c460e 100644 --- a/builder/dockerfile/containerbackend.go +++ b/builder/dockerfile/containerbackend.go @@ -28,8 +28,8 @@ func newContainerManager(docker builder.ExecBackend) *containerManager { } // Create a container -func (c *containerManager) Create(runConfig *container.Config, hostConfig *container.HostConfig) (container.CreateResponse, error) { - container, err := c.backend.ContainerCreateIgnoreImagesArgsEscaped(types.ContainerCreateConfig{ +func (c *containerManager) Create(ctx context.Context, runConfig *container.Config, hostConfig *container.HostConfig) (container.CreateResponse, error) { + container, err := c.backend.ContainerCreateIgnoreImagesArgsEscaped(ctx, types.ContainerCreateConfig{ Config: runConfig, HostConfig: hostConfig, }) @@ -69,7 +69,7 @@ func (c *containerManager) Run(ctx context.Context, cID string, stdout, stderr i } }() - if err := c.backend.ContainerStart(cID, nil, "", ""); err != nil { + if err := c.backend.ContainerStart(ctx, cID, nil, "", ""); err != nil { close(finished) logCancellationError(cancelErrCh, "error from ContainerStart: "+err.Error()) return err diff --git a/builder/dockerfile/dispatchers.go b/builder/dockerfile/dispatchers.go index d946db6c2bcfe..78ae91683ffba 100644 --- a/builder/dockerfile/dispatchers.go +++ b/builder/dockerfile/dispatchers.go @@ -9,6 +9,7 @@ package dockerfile // import "github.com/docker/docker/builder/dockerfile" import ( "bytes" + "context" "fmt" "runtime" "sort" @@ -35,7 +36,7 @@ import ( // // Sets the environment variable foo to bar, also makes interpolation // in the dockerfile available from the next statement on via ${foo}. -func dispatchEnv(d dispatchRequest, c *instructions.EnvCommand) error { +func dispatchEnv(ctx context.Context, d dispatchRequest, c *instructions.EnvCommand) error { runConfig := d.state.runConfig commitMessage := bytes.NewBufferString("ENV") for _, e := range c.Env { @@ -57,22 +58,22 @@ func dispatchEnv(d dispatchRequest, c *instructions.EnvCommand) error { runConfig.Env = append(runConfig.Env, newVar) } } - return d.builder.commit(d.state, commitMessage.String()) + return d.builder.commit(ctx, d.state, commitMessage.String()) } // MAINTAINER some text // // Sets the maintainer metadata. -func dispatchMaintainer(d dispatchRequest, c *instructions.MaintainerCommand) error { +func dispatchMaintainer(ctx context.Context, d dispatchRequest, c *instructions.MaintainerCommand) error { d.state.maintainer = c.Maintainer - return d.builder.commit(d.state, "MAINTAINER "+c.Maintainer) + return d.builder.commit(ctx, d.state, "MAINTAINER "+c.Maintainer) } // LABEL some json data describing the image // // Sets the Label variable foo to bar, -func dispatchLabel(d dispatchRequest, c *instructions.LabelCommand) error { +func dispatchLabel(ctx context.Context, d dispatchRequest, c *instructions.LabelCommand) error { if d.state.runConfig.Labels == nil { d.state.runConfig.Labels = make(map[string]string) } @@ -81,14 +82,14 @@ func dispatchLabel(d dispatchRequest, c *instructions.LabelCommand) error { d.state.runConfig.Labels[v.Key] = v.Value commitStr += " " + v.String() } - return d.builder.commit(d.state, commitStr) + return d.builder.commit(ctx, d.state, commitStr) } // ADD foo /path // // Add the file 'foo' to '/path'. Tarball and Remote URL (http, https) handling // exist here. If you do not wish to have this automatic handling, use COPY. -func dispatchAdd(d dispatchRequest, c *instructions.AddCommand) error { +func dispatchAdd(ctx context.Context, d dispatchRequest, c *instructions.AddCommand) error { if c.Chmod != "" { return errors.New("the --chmod option requires BuildKit. Refer to https://docs.docker.com/go/buildkit/ to learn how to build images with BuildKit enabled") } @@ -103,13 +104,13 @@ func dispatchAdd(d dispatchRequest, c *instructions.AddCommand) error { copyInstruction.chownStr = c.Chown copyInstruction.allowLocalDecompression = true - return d.builder.performCopy(d, copyInstruction) + return d.builder.performCopy(ctx, d, copyInstruction) } // COPY foo /path // // Same as 'ADD' but without the tar and remote url handling. -func dispatchCopy(d dispatchRequest, c *instructions.CopyCommand) error { +func dispatchCopy(ctx context.Context, d dispatchRequest, c *instructions.CopyCommand) error { if c.Chmod != "" { return errors.New("the --chmod option requires BuildKit. Refer to https://docs.docker.com/go/buildkit/ to learn how to build images with BuildKit enabled") } @@ -131,7 +132,7 @@ func dispatchCopy(d dispatchRequest, c *instructions.CopyCommand) error { if c.From != "" && copyInstruction.chownStr == "" { copyInstruction.preserveOwnership = true } - return d.builder.performCopy(d, copyInstruction) + return d.builder.performCopy(ctx, d, copyInstruction) } func (d *dispatchRequest) getImageMount(imageRefOrID string) (*imageMount, error) { @@ -153,7 +154,7 @@ func (d *dispatchRequest) getImageMount(imageRefOrID string) (*imageMount, error } // FROM [--platform=platform] imagename[:tag | @digest] [AS build-stage-name] -func initializeStage(d dispatchRequest, cmd *instructions.Stage) error { +func initializeStage(ctx context.Context, d dispatchRequest, cmd *instructions.Stage) error { d.builder.imageProber.Reset() var platform *specs.Platform @@ -181,12 +182,12 @@ func initializeStage(d dispatchRequest, cmd *instructions.Stage) error { if len(state.runConfig.OnBuild) > 0 { triggers := state.runConfig.OnBuild state.runConfig.OnBuild = nil - return dispatchTriggeredOnBuild(d, triggers) + return dispatchTriggeredOnBuild(ctx, d, triggers) } return nil } -func dispatchTriggeredOnBuild(d dispatchRequest, triggers []string) error { +func dispatchTriggeredOnBuild(ctx context.Context, d dispatchRequest, triggers []string) error { fmt.Fprintf(d.builder.Stdout, "# Executing %d build trigger", len(triggers)) if len(triggers) > 1 { fmt.Fprint(d.builder.Stdout, "s") @@ -209,7 +210,7 @@ func dispatchTriggeredOnBuild(d dispatchRequest, triggers []string) error { } return err } - err = dispatch(d, cmd) + err = dispatch(ctx, d, cmd) if err != nil { return err } @@ -277,15 +278,15 @@ func (d *dispatchRequest) getFromImage(shlex *shell.Lex, basename string, platfo return d.getImageOrStage(name, platform) } -func dispatchOnbuild(d dispatchRequest, c *instructions.OnbuildCommand) error { +func dispatchOnbuild(ctx context.Context, d dispatchRequest, c *instructions.OnbuildCommand) error { d.state.runConfig.OnBuild = append(d.state.runConfig.OnBuild, c.Expression) - return d.builder.commit(d.state, "ONBUILD "+c.Expression) + return d.builder.commit(ctx, d.state, "ONBUILD "+c.Expression) } // WORKDIR /tmp // // Set the working directory for future RUN/CMD/etc statements. -func dispatchWorkdir(d dispatchRequest, c *instructions.WorkdirCommand) error { +func dispatchWorkdir(ctx context.Context, d dispatchRequest, c *instructions.WorkdirCommand) error { runConfig := d.state.runConfig var err error runConfig.WorkingDir, err = normalizeWorkdir(d.state.operatingSystem, runConfig.WorkingDir, c.Path) @@ -306,7 +307,7 @@ func dispatchWorkdir(d dispatchRequest, c *instructions.WorkdirCommand) error { comment := "WORKDIR " + runConfig.WorkingDir runConfigWithCommentCmd := copyRunConfig(runConfig, withCmdCommentString(comment, d.state.operatingSystem)) - containerID, err := d.builder.probeAndCreate(d.state, runConfigWithCommentCmd) + containerID, err := d.builder.probeAndCreate(ctx, d.state, runConfigWithCommentCmd) if err != nil || containerID == "" { return err } @@ -327,7 +328,7 @@ func dispatchWorkdir(d dispatchRequest, c *instructions.WorkdirCommand) error { // RUN echo hi # sh -c echo hi (Linux and LCOW) // RUN echo hi # cmd /S /C echo hi (Windows) // RUN [ "echo", "hi" ] # echo hi -func dispatchRun(d dispatchRequest, c *instructions.RunCommand) error { +func dispatchRun(ctx context.Context, d dispatchRequest, c *instructions.RunCommand) error { if !system.IsOSSupported(d.state.operatingSystem) { return system.ErrNotSupportedOperatingSystem } @@ -361,7 +362,7 @@ func dispatchRun(d dispatchRequest, c *instructions.RunCommand) error { withEntrypointOverride(saveCmd, strslice.StrSlice{""}), withoutHealthcheck()) - cID, err := d.builder.create(runConfig) + cID, err := d.builder.create(ctx, runConfig) if err != nil { return err } @@ -421,7 +422,7 @@ func prependEnvOnCmd(buildArgs *BuildArgs, buildArgVars []string, cmd strslice.S // // Set the default command to run in the container (which may be empty). // Argument handling is the same as RUN. -func dispatchCmd(d dispatchRequest, c *instructions.CmdCommand) error { +func dispatchCmd(ctx context.Context, d dispatchRequest, c *instructions.CmdCommand) error { runConfig := d.state.runConfig cmd, argsEscaped := resolveCmdLine(c.ShellDependantCmdLine, runConfig, d.state.operatingSystem, c.Name(), c.String()) @@ -437,7 +438,7 @@ func dispatchCmd(d dispatchRequest, c *instructions.CmdCommand) error { runConfig.Cmd = cmd runConfig.ArgsEscaped = argsEscaped - if err := d.builder.commit(d.state, fmt.Sprintf("CMD %q", cmd)); err != nil { + if err := d.builder.commit(ctx, d.state, fmt.Sprintf("CMD %q", cmd)); err != nil { return err } if len(c.ShellDependantCmdLine.CmdLine) != 0 { @@ -451,7 +452,7 @@ func dispatchCmd(d dispatchRequest, c *instructions.CmdCommand) error { // // Set the default healthcheck command to run in the container (which may be empty). // Argument handling is the same as RUN. -func dispatchHealthcheck(d dispatchRequest, c *instructions.HealthCheckCommand) error { +func dispatchHealthcheck(ctx context.Context, d dispatchRequest, c *instructions.HealthCheckCommand) error { runConfig := d.state.runConfig if runConfig.Healthcheck != nil { oldCmd := runConfig.Healthcheck.Test @@ -460,7 +461,7 @@ func dispatchHealthcheck(d dispatchRequest, c *instructions.HealthCheckCommand) } } runConfig.Healthcheck = c.Health - return d.builder.commit(d.state, fmt.Sprintf("HEALTHCHECK %q", runConfig.Healthcheck)) + return d.builder.commit(ctx, d.state, fmt.Sprintf("HEALTHCHECK %q", runConfig.Healthcheck)) } // ENTRYPOINT /usr/sbin/nginx @@ -470,7 +471,8 @@ func dispatchHealthcheck(d dispatchRequest, c *instructions.HealthCheckCommand) // // Handles command processing similar to CMD and RUN, only req.runConfig.Entrypoint // is initialized at newBuilder time instead of through argument parsing. -func dispatchEntrypoint(d dispatchRequest, c *instructions.EntrypointCommand) error { +// +func dispatchEntrypoint(ctx context.Context, d dispatchRequest, c *instructions.EntrypointCommand) error { runConfig := d.state.runConfig cmd, argsEscaped := resolveCmdLine(c.ShellDependantCmdLine, runConfig, d.state.operatingSystem, c.Name(), c.String()) @@ -492,14 +494,15 @@ func dispatchEntrypoint(d dispatchRequest, c *instructions.EntrypointCommand) er runConfig.Cmd = nil } - return d.builder.commit(d.state, fmt.Sprintf("ENTRYPOINT %q", runConfig.Entrypoint)) + return d.builder.commit(ctx, d.state, fmt.Sprintf("ENTRYPOINT %q", runConfig.Entrypoint)) } // EXPOSE 6667/tcp 7000/tcp // // Expose ports for links and port mappings. This all ends up in // req.runConfig.ExposedPorts for runconfig. -func dispatchExpose(d dispatchRequest, c *instructions.ExposeCommand, envs []string) error { +// +func dispatchExpose(ctx context.Context, d dispatchRequest, c *instructions.ExposeCommand, envs []string) error { // custom multi word expansion // expose $FOO with FOO="80 443" is expanded as EXPOSE [80,443]. This is the only command supporting word to words expansion // so the word processing has been de-generalized @@ -525,22 +528,22 @@ func dispatchExpose(d dispatchRequest, c *instructions.ExposeCommand, envs []str d.state.runConfig.ExposedPorts[p] = struct{}{} } - return d.builder.commit(d.state, "EXPOSE "+strings.Join(c.Ports, " ")) + return d.builder.commit(ctx, d.state, "EXPOSE "+strings.Join(c.Ports, " ")) } // USER foo // // Set the user to 'foo' for future commands and when running the // ENTRYPOINT/CMD at container run time. -func dispatchUser(d dispatchRequest, c *instructions.UserCommand) error { +func dispatchUser(ctx context.Context, d dispatchRequest, c *instructions.UserCommand) error { d.state.runConfig.User = c.User - return d.builder.commit(d.state, fmt.Sprintf("USER %v", c.User)) + return d.builder.commit(ctx, d.state, fmt.Sprintf("USER %v", c.User)) } // VOLUME /foo // // Expose the volume /foo for use. Will also accept the JSON array form. -func dispatchVolume(d dispatchRequest, c *instructions.VolumeCommand) error { +func dispatchVolume(ctx context.Context, d dispatchRequest, c *instructions.VolumeCommand) error { if d.state.runConfig.Volumes == nil { d.state.runConfig.Volumes = map[string]struct{}{} } @@ -550,20 +553,20 @@ func dispatchVolume(d dispatchRequest, c *instructions.VolumeCommand) error { } d.state.runConfig.Volumes[v] = struct{}{} } - return d.builder.commit(d.state, fmt.Sprintf("VOLUME %v", c.Volumes)) + return d.builder.commit(ctx, d.state, fmt.Sprintf("VOLUME %v", c.Volumes)) } // STOPSIGNAL signal // // Set the signal that will be used to kill the container. -func dispatchStopSignal(d dispatchRequest, c *instructions.StopSignalCommand) error { +func dispatchStopSignal(ctx context.Context, d dispatchRequest, c *instructions.StopSignalCommand) error { _, err := signal.ParseSignal(c.Signal) if err != nil { return errdefs.InvalidParameter(err) } d.state.runConfig.StopSignal = c.Signal - return d.builder.commit(d.state, fmt.Sprintf("STOPSIGNAL %v", c.Signal)) + return d.builder.commit(ctx, d.state, fmt.Sprintf("STOPSIGNAL %v", c.Signal)) } // ARG name[=value] @@ -571,7 +574,7 @@ func dispatchStopSignal(d dispatchRequest, c *instructions.StopSignalCommand) er // Adds the variable foo to the trusted list of variables that can be passed // to builder using the --build-arg flag for expansion/substitution or passing to 'run'. // Dockerfile author may optionally set a default value of this variable. -func dispatchArg(d dispatchRequest, c *instructions.ArgCommand) error { +func dispatchArg(ctx context.Context, d dispatchRequest, c *instructions.ArgCommand) error { var commitStr strings.Builder commitStr.WriteString("ARG ") for i, arg := range c.Args { @@ -586,13 +589,13 @@ func dispatchArg(d dispatchRequest, c *instructions.ArgCommand) error { d.state.buildArgs.AddArg(arg.Key, arg.Value) } - return d.builder.commit(d.state, commitStr.String()) + return d.builder.commit(ctx, d.state, commitStr.String()) } // SHELL powershell -command // // Set the non-default shell to use. -func dispatchShell(d dispatchRequest, c *instructions.ShellCommand) error { +func dispatchShell(ctx context.Context, d dispatchRequest, c *instructions.ShellCommand) error { d.state.runConfig.Shell = c.Shell - return d.builder.commit(d.state, fmt.Sprintf("SHELL %v", d.state.runConfig.Shell)) + return d.builder.commit(ctx, d.state, fmt.Sprintf("SHELL %v", d.state.runConfig.Shell)) } diff --git a/builder/dockerfile/dispatchers_test.go b/builder/dockerfile/dispatchers_test.go index 2c543f60e59d4..aae002392aa2f 100644 --- a/builder/dockerfile/dispatchers_test.go +++ b/builder/dockerfile/dispatchers_test.go @@ -37,7 +37,7 @@ func newBuilderWithMockBackend() *Builder { Options: opts, Backend: mockBackend, }), - imageProber: newImageProber(mockBackend, nil, false), + imageProber: newImageProber(context.TODO(), mockBackend, nil, false), containerManager: newContainerManager(mockBackend), } return b @@ -52,7 +52,7 @@ func TestEnv2Variables(t *testing.T) { instructions.KeyValuePair{Key: "var2", Value: "val2"}, }, } - err := dispatch(sb, envCommand) + err := dispatch(context.TODO(), sb, envCommand) assert.NilError(t, err) expected := []string{ @@ -71,7 +71,7 @@ func TestEnvValueWithExistingRunConfigEnv(t *testing.T) { instructions.KeyValuePair{Key: "var1", Value: "val1"}, }, } - err := dispatch(sb, envCommand) + err := dispatch(context.TODO(), sb, envCommand) assert.NilError(t, err) expected := []string{ "var1=val1", @@ -85,7 +85,7 @@ func TestMaintainer(t *testing.T) { b := newBuilderWithMockBackend() sb := newDispatchRequest(b, '\\', nil, NewBuildArgs(make(map[string]*string)), newStagesBuildResults()) cmd := &instructions.MaintainerCommand{Maintainer: maintainerEntry} - err := dispatch(sb, cmd) + err := dispatch(context.TODO(), sb, cmd) assert.NilError(t, err) assert.Check(t, is.Equal(maintainerEntry, sb.state.maintainer)) } @@ -101,7 +101,7 @@ func TestLabel(t *testing.T) { instructions.KeyValuePair{Key: labelName, Value: labelValue}, }, } - err := dispatch(sb, cmd) + err := dispatch(context.TODO(), sb, cmd) assert.NilError(t, err) assert.Assert(t, is.Contains(sb.state.runConfig.Labels, labelName)) @@ -114,7 +114,7 @@ func TestFromScratch(t *testing.T) { cmd := &instructions.Stage{ BaseName: "scratch", } - err := initializeStage(sb, cmd) + err := initializeStage(context.TODO(), sb, cmd) if runtime.GOOS == "windows" { assert.Check(t, is.Error(err, "Windows does not support FROM scratch")) @@ -151,7 +151,7 @@ func TestFromWithArg(t *testing.T) { sb := newDispatchRequest(b, '\\', nil, args, newStagesBuildResults()) assert.NilError(t, err) - err = initializeStage(sb, cmd) + err = initializeStage(context.TODO(), sb, cmd) assert.NilError(t, err) assert.Check(t, is.Equal(expected, sb.state.imageID)) @@ -172,7 +172,7 @@ func TestFromWithArgButBuildArgsNotGiven(t *testing.T) { sb := newDispatchRequest(b, '\\', nil, args, newStagesBuildResults()) assert.NilError(t, err) - err = initializeStage(sb, cmd) + err = initializeStage(context.TODO(), sb, cmd) assert.Error(t, err, "base name (${THETAG}) should not be blank") } @@ -192,7 +192,7 @@ func TestFromWithUndefinedArg(t *testing.T) { cmd := &instructions.Stage{ BaseName: "alpine${THETAG}", } - err := initializeStage(sb, cmd) + err := initializeStage(context.TODO(), sb, cmd) assert.NilError(t, err) assert.Check(t, is.Equal(expected, sb.state.imageID)) } @@ -204,12 +204,12 @@ func TestFromMultiStageWithNamedStage(t *testing.T) { previousResults := newStagesBuildResults() firstSB := newDispatchRequest(b, '\\', nil, NewBuildArgs(make(map[string]*string)), previousResults) secondSB := newDispatchRequest(b, '\\', nil, NewBuildArgs(make(map[string]*string)), previousResults) - err := initializeStage(firstSB, firstFrom) + err := initializeStage(context.TODO(), firstSB, firstFrom) assert.NilError(t, err) assert.Check(t, firstSB.state.hasFromImage()) previousResults.indexed["base"] = firstSB.state.runConfig previousResults.flat = append(previousResults.flat, firstSB.state.runConfig) - err = initializeStage(secondSB, secondFrom) + err = initializeStage(context.TODO(), secondSB, secondFrom) assert.NilError(t, err) assert.Check(t, secondSB.state.hasFromImage()) } @@ -220,7 +220,7 @@ func TestOnbuild(t *testing.T) { cmd := &instructions.OnbuildCommand{ Expression: "ADD . /app/src", } - err := dispatch(sb, cmd) + err := dispatch(context.TODO(), sb, cmd) assert.NilError(t, err) assert.Check(t, is.Equal("ADD . /app/src", sb.state.runConfig.OnBuild[0])) } @@ -237,7 +237,7 @@ func TestWorkdir(t *testing.T) { Path: workingDir, } - err := dispatch(sb, cmd) + err := dispatch(context.TODO(), sb, cmd) assert.NilError(t, err) assert.Check(t, is.Equal(workingDir, sb.state.runConfig.WorkingDir)) } @@ -254,7 +254,7 @@ func TestCmd(t *testing.T) { PrependShell: true, }, } - err := dispatch(sb, cmd) + err := dispatch(context.TODO(), sb, cmd) assert.NilError(t, err) var expectedCommand strslice.StrSlice @@ -276,7 +276,7 @@ func TestHealthcheckNone(t *testing.T) { Test: []string{"NONE"}, }, } - err := dispatch(sb, cmd) + err := dispatch(context.TODO(), sb, cmd) assert.NilError(t, err) assert.Assert(t, sb.state.runConfig.Healthcheck != nil) @@ -293,7 +293,7 @@ func TestHealthcheckCmd(t *testing.T) { Test: expectedTest, }, } - err := dispatch(sb, cmd) + err := dispatch(context.TODO(), sb, cmd) assert.NilError(t, err) assert.Assert(t, sb.state.runConfig.Healthcheck != nil) @@ -312,7 +312,7 @@ func TestEntrypoint(t *testing.T) { PrependShell: true, }, } - err := dispatch(sb, cmd) + err := dispatch(context.TODO(), sb, cmd) assert.NilError(t, err) assert.Assert(t, sb.state.runConfig.Entrypoint != nil) @@ -333,7 +333,7 @@ func TestExpose(t *testing.T) { cmd := &instructions.ExposeCommand{ Ports: []string{exposedPort}, } - err := dispatch(sb, cmd) + err := dispatch(context.TODO(), sb, cmd) assert.NilError(t, err) assert.Assert(t, sb.state.runConfig.ExposedPorts != nil) @@ -351,7 +351,7 @@ func TestUser(t *testing.T) { cmd := &instructions.UserCommand{ User: "test", } - err := dispatch(sb, cmd) + err := dispatch(context.TODO(), sb, cmd) assert.NilError(t, err) assert.Check(t, is.Equal("test", sb.state.runConfig.User)) } @@ -365,7 +365,7 @@ func TestVolume(t *testing.T) { cmd := &instructions.VolumeCommand{ Volumes: []string{exposedVolume}, } - err := dispatch(sb, cmd) + err := dispatch(context.TODO(), sb, cmd) assert.NilError(t, err) assert.Assert(t, sb.state.runConfig.Volumes != nil) assert.Check(t, is.Len(sb.state.runConfig.Volumes, 1)) @@ -385,7 +385,7 @@ func TestStopSignal(t *testing.T) { cmd := &instructions.StopSignalCommand{ Signal: signal, } - err := dispatch(sb, cmd) + err := dispatch(context.TODO(), sb, cmd) assert.NilError(t, err) assert.Check(t, is.Equal(signal, sb.state.runConfig.StopSignal)) } @@ -397,7 +397,7 @@ func TestArg(t *testing.T) { argName := "foo" argVal := "bar" cmd := &instructions.ArgCommand{Args: []instructions.KeyValuePairOptional{{Key: argName, Value: &argVal}}} - err := dispatch(sb, cmd) + err := dispatch(context.TODO(), sb, cmd) assert.NilError(t, err) expected := map[string]string{argName: argVal} @@ -411,7 +411,7 @@ func TestShell(t *testing.T) { shellCmd := "powershell" cmd := &instructions.ShellCommand{Shell: strslice.StrSlice{shellCmd}} - err := dispatch(sb, cmd) + err := dispatch(context.TODO(), sb, cmd) assert.NilError(t, err) expectedShell := strslice.StrSlice([]string{shellCmd}) @@ -463,7 +463,7 @@ func TestRunWithBuildArgs(t *testing.T) { mockBackend.makeImageCacheFunc = func(_ []string) builder.ImageCache { return imageCache } - b.imageProber = newImageProber(mockBackend, nil, false) + b.imageProber = newImageProber(nil, mockBackend, nil, false) mockBackend.getImageFunc = func(_ string) (builder.Image, builder.ROLayer, error) { return &mockImage{ id: "abcdef", @@ -485,7 +485,7 @@ func TestRunWithBuildArgs(t *testing.T) { return "", nil } from := &instructions.Stage{BaseName: "abcdef"} - err := initializeStage(sb, from) + err := initializeStage(context.TODO(), sb, from) assert.NilError(t, err) sb.state.buildArgs.AddArg("one", strPtr("two")) @@ -505,7 +505,7 @@ func TestRunWithBuildArgs(t *testing.T) { runinst.CmdLine = strslice.StrSlice{"echo foo"} runinst.PrependShell = true - assert.NilError(t, dispatch(sb, runinst)) + assert.NilError(t, dispatch(context.TODO(), sb, runinst)) // Check that runConfig.Cmd has not been modified by run assert.Check(t, is.DeepEqual(origCmd, sb.state.runConfig.Cmd)) @@ -529,7 +529,7 @@ func TestRunIgnoresHealthcheck(t *testing.T) { mockBackend.makeImageCacheFunc = func(_ []string) builder.ImageCache { return imageCache } - b.imageProber = newImageProber(mockBackend, nil, false) + b.imageProber = newImageProber(nil, mockBackend, nil, false) mockBackend.getImageFunc = func(_ string) (builder.Image, builder.ROLayer, error) { return &mockImage{ id: "abcdef", @@ -543,7 +543,7 @@ func TestRunIgnoresHealthcheck(t *testing.T) { return "", nil } from := &instructions.Stage{BaseName: "abcdef"} - err := initializeStage(sb, from) + err := initializeStage(context.TODO(), sb, from) assert.NilError(t, err) expectedTest := []string{"CMD-SHELL", "curl -f http://localhost/ || exit 1"} @@ -560,7 +560,7 @@ func TestRunIgnoresHealthcheck(t *testing.T) { assert.NilError(t, err) cmd := healthint.(*instructions.HealthCheckCommand) - assert.NilError(t, dispatch(sb, cmd)) + assert.NilError(t, dispatch(context.TODO(), sb, cmd)) assert.Assert(t, sb.state.runConfig.Healthcheck != nil) mockBackend.containerCreateFunc = func(config types.ContainerCreateConfig) (container.CreateResponse, error) { @@ -575,7 +575,7 @@ func TestRunIgnoresHealthcheck(t *testing.T) { run := runint.(*instructions.RunCommand) run.PrependShell = true - assert.NilError(t, dispatch(sb, run)) + assert.NilError(t, dispatch(context.TODO(), sb, run)) assert.Check(t, is.DeepEqual(expectedTest, sb.state.runConfig.Healthcheck.Test)) } @@ -593,7 +593,7 @@ func TestDispatchUnsupportedOptions(t *testing.T) { }, Chmod: "0655", } - err := dispatch(sb, cmd) + err := dispatch(context.TODO(), sb, cmd) assert.Error(t, err, "the --chmod option requires BuildKit. Refer to https://docs.docker.com/go/buildkit/ to learn how to build images with BuildKit enabled") }) @@ -605,7 +605,7 @@ func TestDispatchUnsupportedOptions(t *testing.T) { }, Chmod: "0655", } - err := dispatch(sb, cmd) + err := dispatch(context.TODO(), sb, cmd) assert.Error(t, err, "the --chmod option requires BuildKit. Refer to https://docs.docker.com/go/buildkit/ to learn how to build images with BuildKit enabled") }) @@ -619,7 +619,7 @@ func TestDispatchUnsupportedOptions(t *testing.T) { // one or more of these flags will be supported in future for _, f := range []string{"mount", "network", "security", "any-flag"} { cmd.FlagsUsed = []string{f} - err := dispatch(sb, cmd) + err := dispatch(context.TODO(), sb, cmd) assert.Error(t, err, fmt.Sprintf("the --%s option requires BuildKit. Refer to https://docs.docker.com/go/buildkit/ to learn how to build images with BuildKit enabled", f)) } }) diff --git a/builder/dockerfile/evaluator.go b/builder/dockerfile/evaluator.go index 1201eb320b4ea..f03a0490f9914 100644 --- a/builder/dockerfile/evaluator.go +++ b/builder/dockerfile/evaluator.go @@ -20,6 +20,7 @@ package dockerfile // import "github.com/docker/docker/builder/dockerfile" import ( + "context" "reflect" "strconv" "strings" @@ -34,7 +35,7 @@ import ( "github.com/pkg/errors" ) -func dispatch(d dispatchRequest, cmd instructions.Command) (err error) { +func dispatch(ctx context.Context, d dispatchRequest, cmd instructions.Command) (err error) { if c, ok := cmd.(instructions.PlatformSpecific); ok { err := c.CheckPlatform(d.state.operatingSystem) if err != nil { @@ -65,39 +66,39 @@ func dispatch(d dispatchRequest, cmd instructions.Command) (err error) { }() switch c := cmd.(type) { case *instructions.EnvCommand: - return dispatchEnv(d, c) + return dispatchEnv(ctx, d, c) case *instructions.MaintainerCommand: - return dispatchMaintainer(d, c) + return dispatchMaintainer(ctx, d, c) case *instructions.LabelCommand: - return dispatchLabel(d, c) + return dispatchLabel(ctx, d, c) case *instructions.AddCommand: - return dispatchAdd(d, c) + return dispatchAdd(ctx, d, c) case *instructions.CopyCommand: - return dispatchCopy(d, c) + return dispatchCopy(ctx, d, c) case *instructions.OnbuildCommand: - return dispatchOnbuild(d, c) + return dispatchOnbuild(ctx, d, c) case *instructions.WorkdirCommand: - return dispatchWorkdir(d, c) + return dispatchWorkdir(ctx, d, c) case *instructions.RunCommand: - return dispatchRun(d, c) + return dispatchRun(ctx, d, c) case *instructions.CmdCommand: - return dispatchCmd(d, c) + return dispatchCmd(ctx, d, c) case *instructions.HealthCheckCommand: - return dispatchHealthcheck(d, c) + return dispatchHealthcheck(ctx, d, c) case *instructions.EntrypointCommand: - return dispatchEntrypoint(d, c) + return dispatchEntrypoint(ctx, d, c) case *instructions.ExposeCommand: - return dispatchExpose(d, c, envs) + return dispatchExpose(ctx, d, c, envs) case *instructions.UserCommand: - return dispatchUser(d, c) + return dispatchUser(ctx, d, c) case *instructions.VolumeCommand: - return dispatchVolume(d, c) + return dispatchVolume(ctx, d, c) case *instructions.StopSignalCommand: - return dispatchStopSignal(d, c) + return dispatchStopSignal(ctx, d, c) case *instructions.ArgCommand: - return dispatchArg(d, c) + return dispatchArg(ctx, d, c) case *instructions.ShellCommand: - return dispatchShell(d, c) + return dispatchShell(ctx, d, c) } return errors.Errorf("unsupported command type: %v", reflect.TypeOf(cmd)) } diff --git a/builder/dockerfile/evaluator_test.go b/builder/dockerfile/evaluator_test.go index 27e01954d0bf2..e151559ad9bb7 100644 --- a/builder/dockerfile/evaluator_test.go +++ b/builder/dockerfile/evaluator_test.go @@ -1,6 +1,7 @@ package dockerfile // import "github.com/docker/docker/builder/dockerfile" import ( + ctx "context" "os" "runtime" "testing" @@ -129,7 +130,7 @@ func TestDispatch(t *testing.T) { b := newBuilderWithMockBackend() sb := newDispatchRequest(b, '`', context, NewBuildArgs(make(map[string]*string)), newStagesBuildResults()) - err = dispatch(sb, tc.cmd) + err = dispatch(ctx.TODO(), sb, tc.cmd) assert.Check(t, is.ErrorContains(err, tc.expectedError)) }) } diff --git a/builder/dockerfile/imageprobe.go b/builder/dockerfile/imageprobe.go index 6960bf8897360..028b08c3eaeb6 100644 --- a/builder/dockerfile/imageprobe.go +++ b/builder/dockerfile/imageprobe.go @@ -1,6 +1,8 @@ package dockerfile // import "github.com/docker/docker/builder/dockerfile" import ( + "context" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/builder" "github.com/sirupsen/logrus" @@ -19,13 +21,13 @@ type imageProber struct { cacheBusted bool } -func newImageProber(cacheBuilder builder.ImageCacheBuilder, cacheFrom []string, noCache bool) ImageProber { +func newImageProber(ctx context.Context, cacheBuilder builder.ImageCacheBuilder, cacheFrom []string, noCache bool) ImageProber { if noCache { return &nopProber{} } reset := func() builder.ImageCache { - return cacheBuilder.MakeImageCache(cacheFrom) + return cacheBuilder.MakeImageCache(ctx, cacheFrom) } return &imageProber{cache: reset(), reset: reset} } diff --git a/builder/dockerfile/internals.go b/builder/dockerfile/internals.go index 1bc445d383cd0..544b977f240f2 100644 --- a/builder/dockerfile/internals.go +++ b/builder/dockerfile/internals.go @@ -4,6 +4,7 @@ package dockerfile // import "github.com/docker/docker/builder/dockerfile" // non-contiguous functionality. Please read the comments. import ( + "context" "crypto/sha256" "encoding/hex" "fmt" @@ -72,7 +73,7 @@ func (b *Builder) getArchiver(src, dst containerfs.Driver) Archiver { } } -func (b *Builder) commit(dispatchState *dispatchState, comment string) error { +func (b *Builder) commit(ctx context.Context, dispatchState *dispatchState, comment string) error { if b.disableCommit { return nil } @@ -81,7 +82,7 @@ func (b *Builder) commit(dispatchState *dispatchState, comment string) error { } runConfigWithCommentCmd := copyRunConfig(dispatchState.runConfig, withCmdComment(comment, dispatchState.operatingSystem)) - id, err := b.probeAndCreate(dispatchState, runConfigWithCommentCmd) + id, err := b.probeAndCreate(ctx, dispatchState, runConfigWithCommentCmd) if err != nil || id == "" { return err } @@ -152,7 +153,7 @@ func (b *Builder) exportImage(state *dispatchState, layer builder.RWLayer, paren return nil } -func (b *Builder) performCopy(req dispatchRequest, inst copyInstruction) error { +func (b *Builder) performCopy(ctx context.Context, req dispatchRequest, inst copyInstruction) error { state := req.state srcHash := getSourceHashFromInfos(inst.infos) @@ -192,7 +193,7 @@ func (b *Builder) performCopy(req dispatchRequest, inst copyInstruction) error { // translated (if necessary because of user namespaces), and replace // the root pair with the chown pair for copy operations if inst.chownStr != "" { - identity, err = parseChownFlag(b, state, inst.chownStr, destInfo.root.Path(), b.idMapping) + identity, err = parseChownFlag(ctx, b, state, inst.chownStr, destInfo.root.Path(), b.idMapping) if err != nil { if b.options.Platform != "windows" { return errors.Wrapf(err, "unable to convert uid/gid chown string to host mapping") @@ -376,18 +377,18 @@ func (b *Builder) probeCache(dispatchState *dispatchState, runConfig *container. var defaultLogConfig = container.LogConfig{Type: "none"} -func (b *Builder) probeAndCreate(dispatchState *dispatchState, runConfig *container.Config) (string, error) { +func (b *Builder) probeAndCreate(ctx context.Context, dispatchState *dispatchState, runConfig *container.Config) (string, error) { if hit, err := b.probeCache(dispatchState, runConfig); err != nil || hit { return "", err } - return b.create(runConfig) + return b.create(ctx, runConfig) } -func (b *Builder) create(runConfig *container.Config) (string, error) { +func (b *Builder) create(ctx context.Context, runConfig *container.Config) (string, error) { logrus.Debugf("[BUILDER] Command to be executed: %v", runConfig.Cmd) hostConfig := hostConfigFromOptions(b.options) - container, err := b.containerManager.Create(runConfig, hostConfig) + container, err := b.containerManager.Create(ctx, runConfig, hostConfig) if err != nil { return "", err } diff --git a/builder/dockerfile/internals_linux.go b/builder/dockerfile/internals_linux.go index d4c714241fa1e..37e096590f479 100644 --- a/builder/dockerfile/internals_linux.go +++ b/builder/dockerfile/internals_linux.go @@ -1,6 +1,7 @@ package dockerfile // import "github.com/docker/docker/builder/dockerfile" import ( + "context" "path/filepath" "strconv" "strings" @@ -11,7 +12,7 @@ import ( "github.com/pkg/errors" ) -func parseChownFlag(builder *Builder, state *dispatchState, chown, ctrRootPath string, identityMapping idtools.IdentityMapping) (idtools.Identity, error) { +func parseChownFlag(ctx context.Context, builder *Builder, state *dispatchState, chown, ctrRootPath string, identityMapping idtools.IdentityMapping) (idtools.Identity, error) { var userStr, grpStr string parts := strings.Split(chown, ":") if len(parts) > 2 { diff --git a/builder/dockerfile/internals_linux_test.go b/builder/dockerfile/internals_linux_test.go index 75af92ab5f91e..6f56d43afcb52 100644 --- a/builder/dockerfile/internals_linux_test.go +++ b/builder/dockerfile/internals_linux_test.go @@ -1,6 +1,7 @@ package dockerfile // import "github.com/docker/docker/builder/dockerfile" import ( + "context" "os" "path/filepath" "testing" @@ -115,7 +116,7 @@ othergrp:x:6666: }, } { t.Run(testcase.name, func(t *testing.T) { - idPair, err := parseChownFlag(testcase.builder, testcase.state, testcase.chownStr, contextDir, testcase.idMapping) + idPair, err := parseChownFlag(context.TODO(), testcase.builder, testcase.state, testcase.chownStr, contextDir, testcase.idMapping) assert.NilError(t, err, "Failed to parse chown flag: %q", testcase.chownStr) assert.Check(t, is.DeepEqual(testcase.expected, idPair), "chown flag mapping failure") }) @@ -156,7 +157,7 @@ othergrp:x:6666: }, } { t.Run(testcase.name, func(t *testing.T) { - _, err := parseChownFlag(testcase.builder, testcase.state, testcase.chownStr, contextDir, testcase.idMapping) + _, err := parseChownFlag(context.TODO(), testcase.builder, testcase.state, testcase.chownStr, contextDir, testcase.idMapping) assert.Check(t, is.Error(err, testcase.descr), "Expected error string doesn't match") }) } diff --git a/builder/dockerfile/internals_windows.go b/builder/dockerfile/internals_windows.go index 335f87cdc7b0f..4bdb54f04dd37 100644 --- a/builder/dockerfile/internals_windows.go +++ b/builder/dockerfile/internals_windows.go @@ -2,6 +2,7 @@ package dockerfile // import "github.com/docker/docker/builder/dockerfile" import ( "bytes" + "context" "os" "path/filepath" "strings" @@ -14,15 +15,15 @@ import ( "golang.org/x/sys/windows" ) -func parseChownFlag(builder *Builder, state *dispatchState, chown, ctrRootPath string, identityMapping idtools.IdentityMapping) (idtools.Identity, error) { +func parseChownFlag(ctx context.Context, builder *Builder, state *dispatchState, chown, ctrRootPath string, identityMapping idtools.IdentityMapping) (idtools.Identity, error) { if builder.options.Platform == "windows" { - return getAccountIdentity(builder, chown, ctrRootPath, state) + return getAccountIdentity(ctx, builder, chown, ctrRootPath, state) } return identityMapping.RootPair(), nil } -func getAccountIdentity(builder *Builder, accountName string, ctrRootPath string, state *dispatchState) (idtools.Identity, error) { +func getAccountIdentity(ctx context.Context, builder *Builder, accountName string, ctrRootPath string, state *dispatchState) (idtools.Identity, error) { // If this is potentially a string SID then attempt to convert it to verify // this, otherwise continue looking for the account. if strings.HasPrefix(accountName, "S-") || strings.HasPrefix(accountName, "s-") { @@ -51,10 +52,10 @@ func getAccountIdentity(builder *Builder, accountName string, ctrRootPath string // All other lookups failed, so therefore determine if the account in // question exists in the container and if so, obtain its SID. - return lookupNTAccount(builder, accountName, state) + return lookupNTAccount(ctx, builder, accountName, state) } -func lookupNTAccount(builder *Builder, accountName string, state *dispatchState) (idtools.Identity, error) { +func lookupNTAccount(ctx context.Context, builder *Builder, accountName string, state *dispatchState) (idtools.Identity, error) { source, _ := filepath.Split(os.Args[0]) @@ -81,7 +82,7 @@ func lookupNTAccount(builder *Builder, accountName string, state *dispatchState) }, } - container, err := builder.containerManager.Create(runConfig, hostConfig) + container, err := builder.containerManager.Create(ctx, runConfig, hostConfig) if err != nil { return idtools.Identity{}, err } diff --git a/builder/dockerfile/mockbackend_test.go b/builder/dockerfile/mockbackend_test.go index 0310374a69208..da77054521c9e 100644 --- a/builder/dockerfile/mockbackend_test.go +++ b/builder/dockerfile/mockbackend_test.go @@ -28,7 +28,7 @@ func (m *MockBackend) ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout return nil } -func (m *MockBackend) ContainerCreateIgnoreImagesArgsEscaped(config types.ContainerCreateConfig) (container.CreateResponse, error) { +func (m *MockBackend) ContainerCreateIgnoreImagesArgsEscaped(ctx context.Context, config types.ContainerCreateConfig) (container.CreateResponse, error) { if m.containerCreateFunc != nil { return m.containerCreateFunc(config) } @@ -50,7 +50,7 @@ func (m *MockBackend) ContainerKill(containerID string, sig string) error { return nil } -func (m *MockBackend) ContainerStart(containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error { +func (m *MockBackend) ContainerStart(ctx context.Context, containerID string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error { return nil } @@ -74,7 +74,7 @@ func (m *MockBackend) GetImageAndReleasableLayer(ctx context.Context, refOrID st return &mockImage{id: "theid"}, &mockLayer{}, nil } -func (m *MockBackend) MakeImageCache(cacheFrom []string) builder.ImageCache { +func (m *MockBackend) MakeImageCache(ctx context.Context, cacheFrom []string) builder.ImageCache { if m.makeImageCacheFunc != nil { return m.makeImageCacheFunc(cacheFrom) } diff --git a/cmd/dockerd/daemon.go b/cmd/dockerd/daemon.go index 7d3dc6ae1abd4..c1d739891f3ff 100644 --- a/cmd/dockerd/daemon.go +++ b/cmd/dockerd/daemon.go @@ -253,7 +253,7 @@ func (cli *DaemonCli) start(opts *daemonOptions) (err error) { // notify systemd that we're shutting down notifyStopping() - shutdownDaemon(d) + shutdownDaemon(ctx, d) // Stop notification processing and any background processes cancel() @@ -361,11 +361,11 @@ func (cli *DaemonCli) stop() { // shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case // d.Shutdown() is waiting too long to kill container or worst it's // blocked there -func shutdownDaemon(d *daemon.Daemon) { +func shutdownDaemon(ctx context.Context, d *daemon.Daemon) { shutdownTimeout := d.ShutdownTimeout() ch := make(chan struct{}) go func() { - d.Shutdown() + d.Shutdown(ctx) close(ch) }() if shutdownTimeout < 0 { diff --git a/daemon/cluster/executor/backend.go b/daemon/cluster/executor/backend.go index 280c030106880..16ff7bf8dc717 100644 --- a/daemon/cluster/executor/backend.go +++ b/daemon/cluster/executor/backend.go @@ -36,8 +36,8 @@ type Backend interface { FindNetwork(idName string) (libnetwork.Network, error) SetupIngress(clustertypes.NetworkCreateRequest, string) (<-chan struct{}, error) ReleaseIngress() (<-chan struct{}, error) - CreateManagedContainer(config types.ContainerCreateConfig) (container.CreateResponse, error) - ContainerStart(name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error + CreateManagedContainer(ctx context.Context, config types.ContainerCreateConfig) (container.CreateResponse, error) + ContainerStart(ctx context.Context, name string, hostConfig *container.HostConfig, checkpoint string, checkpointDir string) error ContainerStop(ctx context.Context, name string, config container.StopOptions) error ContainerLogs(ctx context.Context, name string, config *types.ContainerLogsOptions) (msgs <-chan *backend.LogMessage, tty bool, err error) ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error @@ -52,7 +52,7 @@ type Backend interface { SetContainerSecretReferences(name string, refs []*swarm.SecretReference) error SetContainerConfigReferences(name string, refs []*swarm.ConfigReference) error SystemInfo() *types.Info - Containers(config *types.ContainerListOptions) ([]*types.Container, error) + Containers(ctx context.Context, config *types.ContainerListOptions) ([]*types.Container, error) SetNetworkBootstrapKeys([]*networktypes.EncryptionKey) error DaemonJoinsCluster(provider cluster.Provider) DaemonLeavesCluster() @@ -76,5 +76,5 @@ type VolumeBackend interface { type ImageBackend interface { PullImage(ctx context.Context, image, tag string, platform *specs.Platform, metaHeaders map[string][]string, authConfig *registry.AuthConfig, outStream io.Writer) error GetRepository(context.Context, reference.Named, *registry.AuthConfig) (distribution.Repository, error) - GetImage(refOrID string, platform *specs.Platform) (retImg *image.Image, retErr error) + GetImage(ctx context.Context, refOrID string, platform *specs.Platform) (retImg *image.Image, retErr error) } diff --git a/daemon/cluster/executor/container/adapter.go b/daemon/cluster/executor/container/adapter.go index 979e788fe4def..bf0dabf08837f 100644 --- a/daemon/cluster/executor/container/adapter.go +++ b/daemon/cluster/executor/container/adapter.go @@ -75,7 +75,7 @@ func (c *containerAdapter) pullImage(ctx context.Context) error { named, err := reference.ParseNormalizedNamed(spec.Image) if err == nil { if _, ok := named.(reference.Canonical); ok { - _, err := c.imageBackend.GetImage(spec.Image, nil) + _, err := c.imageBackend.GetImage(ctx, spec.Image, nil) if err == nil { return nil } @@ -286,7 +286,7 @@ func (c *containerAdapter) waitForDetach(ctx context.Context) error { func (c *containerAdapter) create(ctx context.Context) error { var cr containertypes.CreateResponse var err error - if cr, err = c.backend.CreateManagedContainer(types.ContainerCreateConfig{ + if cr, err = c.backend.CreateManagedContainer(ctx, types.ContainerCreateConfig{ Name: c.container.name(), Config: c.container.config(), HostConfig: c.container.hostConfig(c.dependencies.Volumes()), @@ -353,7 +353,7 @@ func (c *containerAdapter) start(ctx context.Context) error { return err } - return c.backend.ContainerStart(c.container.name(), nil, "", "") + return c.backend.ContainerStart(ctx, c.container.name(), nil, "", "") } func (c *containerAdapter) inspect(ctx context.Context) (types.ContainerJSON, error) { diff --git a/daemon/cluster/swarm.go b/daemon/cluster/swarm.go index 99d6ce17a360b..23478c8cd7dbf 100644 --- a/daemon/cluster/swarm.go +++ b/daemon/cluster/swarm.go @@ -356,7 +356,7 @@ func (c *Cluster) UnlockSwarm(req types.UnlockRequest) error { } // Leave shuts down Cluster and removes current state. -func (c *Cluster) Leave(force bool) error { +func (c *Cluster) Leave(ctx context.Context, force bool) error { c.controlMutex.Lock() defer c.controlMutex.Unlock() @@ -408,7 +408,7 @@ func (c *Cluster) Leave(force bool) error { c.mu.Unlock() if nodeID := state.NodeID(); nodeID != "" { - nodeContainers, err := c.listContainerForNode(nodeID) + nodeContainers, err := c.listContainerForNode(ctx, nodeID) if err != nil { return err } @@ -604,11 +604,11 @@ func initClusterSpec(node *swarmnode.Node, spec types.Spec) error { return ctx.Err() } -func (c *Cluster) listContainerForNode(nodeID string) ([]string, error) { +func (c *Cluster) listContainerForNode(ctx context.Context, nodeID string) ([]string, error) { var ids []string filters := filters.NewArgs() filters.Add("label", fmt.Sprintf("com.docker.swarm.node.id=%s", nodeID)) - containers, err := c.config.Backend.Containers(&apitypes.ContainerListOptions{ + containers, err := c.config.Backend.Containers(ctx, &apitypes.ContainerListOptions{ Filters: filters, }) if err != nil { diff --git a/daemon/commit.go b/daemon/commit.go index 302e9a95d6fe5..7b7d4e4446391 100644 --- a/daemon/commit.go +++ b/daemon/commit.go @@ -1,6 +1,7 @@ package daemon // import "github.com/docker/docker/daemon" import ( + "context" "fmt" "runtime" "strings" @@ -116,7 +117,7 @@ func merge(userConf, imageConf *containertypes.Config) error { // CreateImageFromContainer creates a new image from a container. The container // config will be updated by applying the change set to the custom config, then // applying that config over the existing container config. -func (daemon *Daemon) CreateImageFromContainer(name string, c *backend.CreateImageConfig) (string, error) { +func (daemon *Daemon) CreateImageFromContainer(ctx context.Context, name string, c *backend.CreateImageConfig) (string, error) { start := time.Now() container, err := daemon.GetContainer(name) if err != nil { @@ -146,7 +147,7 @@ func (daemon *Daemon) CreateImageFromContainer(name string, c *backend.CreateIma if c.Config == nil { c.Config = container.Config } - newConfig, err := dockerfile.BuildFromConfig(c.Config, c.Changes, container.OS) + newConfig, err := dockerfile.BuildFromConfig(ctx, c.Config, c.Changes, container.OS) if err != nil { return "", err } diff --git a/daemon/containerd/cache.go b/daemon/containerd/cache.go index c035025502244..62cdaa6c1fe70 100644 --- a/daemon/containerd/cache.go +++ b/daemon/containerd/cache.go @@ -1,10 +1,12 @@ package containerd import ( + "context" + "github.com/docker/docker/builder" ) // MakeImageCache creates a stateful image cache. -func (i *ImageService) MakeImageCache(cacheFrom []string) builder.ImageCache { +func (i *ImageService) MakeImageCache(ctx context.Context, cacheFrom []string) builder.ImageCache { panic("not implemented") } diff --git a/daemon/containerd/image.go b/daemon/containerd/image.go index e4022b204eacb..33ae73a5f21e7 100644 --- a/daemon/containerd/image.go +++ b/daemon/containerd/image.go @@ -1,11 +1,222 @@ package containerd import ( + "context" + "encoding/json" + "fmt" + "regexp" + + "github.com/containerd/containerd" + "github.com/containerd/containerd/content" + cerrdefs "github.com/containerd/containerd/errdefs" + containerdimages "github.com/containerd/containerd/images" + "github.com/docker/distribution/reference" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/errdefs" "github.com/docker/docker/image" - specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" ) +var shortID = regexp.MustCompile(`^([a-f0-9]{4,64})$`) + // GetImage returns an image corresponding to the image referred to by refOrID. -func (i *ImageService) GetImage(refOrID string, platform *specs.Platform) (retImg *image.Image, retErr error) { - panic("not implemented") +func (i *ImageService) GetImage(ctx context.Context, refOrID string, platform *ocispec.Platform) (*image.Image, error) { + desc, err := i.ResolveImage(ctx, refOrID) + if err != nil { + return nil, err + } + + ctrdimg, err := i.resolveImageName2(ctx, refOrID) + if err != nil { + return nil, err + } + ii := containerd.NewImage(i.client, ctrdimg) + provider := i.client.ContentStore() + conf, err := ctrdimg.Config(ctx, provider, ii.Platform()) + if err != nil { + return nil, err + } + + var ociimage ocispec.Image + imageConfigBytes, err := content.ReadBlob(ctx, ii.ContentStore(), conf) + if err != nil { + return nil, err + } + + if err := json.Unmarshal(imageConfigBytes, &ociimage); err != nil { + return nil, err + } + + return &image.Image{ + V1Image: image.V1Image{ + ID: string(desc.Digest), + OS: ociimage.OS, + Architecture: ociimage.Architecture, + Config: &containertypes.Config{ + Entrypoint: ociimage.Config.Entrypoint, + Env: ociimage.Config.Env, + Cmd: ociimage.Config.Cmd, + User: ociimage.Config.User, + WorkingDir: ociimage.Config.WorkingDir, + }, + }, + }, nil +} + +// ResolveImage searches for an image based on the given +// reference or identifier. Returns the descriptor of +// the image, could be manifest list, manifest, or config. +func (i *ImageService) ResolveImage(ctx context.Context, refOrID string) (d ocispec.Descriptor, err error) { + d, _, err = i.resolveImageName(ctx, refOrID) + return +} + +func (i *ImageService) resolveImageName2(ctx context.Context, refOrID string) (img containerdimages.Image, err error) { + parsed, err := reference.ParseAnyReference(refOrID) + if err != nil { + return img, errdefs.InvalidParameter(err) + } + + is := i.client.ImageService() + + namedRef, ok := parsed.(reference.Named) + if !ok { + digested, ok := parsed.(reference.Digested) + if !ok { + return img, errdefs.InvalidParameter(errors.New("bad reference")) + } + + imgs, err := is.List(ctx, fmt.Sprintf("target.digest==%s", digested.Digest())) + if err != nil { + return img, errors.Wrap(err, "failed to lookup digest") + } + if len(imgs) == 0 { + return img, errdefs.NotFound(errors.New("image not found with digest")) + } + + return imgs[0], nil + } + + namedRef = reference.TagNameOnly(namedRef) + + // If the identifier could be a short ID, attempt to match + if shortID.MatchString(refOrID) { + ref := namedRef.String() + filters := []string{ + fmt.Sprintf("name==%q", ref), + fmt.Sprintf(`target.digest~=/sha256:%s[0-9a-fA-F]{%d}/`, refOrID, 64-len(refOrID)), + } + imgs, err := is.List(ctx, filters...) + if err != nil { + return img, err + } + + if len(imgs) == 0 { + return img, errdefs.NotFound(errors.New("list returned no images")) + } + if len(imgs) > 1 { + digests := map[digest.Digest]struct{}{} + for _, img := range imgs { + if img.Name == ref { + return img, nil + } + digests[img.Target.Digest] = struct{}{} + } + + if len(digests) > 1 { + return img, errdefs.NotFound(errors.New("ambiguous reference")) + } + } + + if imgs[0].Name != ref { + namedRef = nil + } + return imgs[0], nil + } + img, err = is.Get(ctx, namedRef.String()) + if err != nil { + // TODO(containerd): error translation can use common function + if !cerrdefs.IsNotFound(err) { + return img, err + } + return img, errdefs.NotFound(errors.New("id not found")) + } + + return img, nil +} + +func (i *ImageService) resolveImageName(ctx context.Context, refOrID string) (ocispec.Descriptor, reference.Named, error) { + parsed, err := reference.ParseAnyReference(refOrID) + if err != nil { + return ocispec.Descriptor{}, nil, errdefs.InvalidParameter(err) + } + + is := i.client.ImageService() + + namedRef, ok := parsed.(reference.Named) + if !ok { + digested, ok := parsed.(reference.Digested) + if !ok { + return ocispec.Descriptor{}, nil, errdefs.InvalidParameter(errors.New("bad reference")) + } + + imgs, err := is.List(ctx, fmt.Sprintf("target.digest==%s", digested.Digest())) + if err != nil { + return ocispec.Descriptor{}, nil, errors.Wrap(err, "failed to lookup digest") + } + if len(imgs) == 0 { + return ocispec.Descriptor{}, nil, errdefs.NotFound(errors.New("image not found with digest")) + } + + return imgs[0].Target, nil, nil + } + + namedRef = reference.TagNameOnly(namedRef) + + // If the identifier could be a short ID, attempt to match + if shortID.MatchString(refOrID) { + ref := namedRef.String() + filters := []string{ + fmt.Sprintf("name==%q", ref), + fmt.Sprintf(`target.digest~=/sha256:%s[0-9a-fA-F]{%d}/`, refOrID, 64-len(refOrID)), + } + imgs, err := is.List(ctx, filters...) + if err != nil { + return ocispec.Descriptor{}, nil, err + } + + if len(imgs) == 0 { + return ocispec.Descriptor{}, nil, errdefs.NotFound(errors.New("list returned no images")) + } + if len(imgs) > 1 { + digests := map[digest.Digest]struct{}{} + for _, img := range imgs { + if img.Name == ref { + return img.Target, namedRef, nil + } + digests[img.Target.Digest] = struct{}{} + } + + if len(digests) > 1 { + return ocispec.Descriptor{}, nil, errdefs.NotFound(errors.New("ambiguous reference")) + } + } + + if imgs[0].Name != ref { + namedRef = nil + } + return imgs[0].Target, namedRef, nil + } + img, err := is.Get(ctx, namedRef.String()) + if err != nil { + // TODO(containerd): error translation can use common function + if !cerrdefs.IsNotFound(err) { + return ocispec.Descriptor{}, nil, err + } + return ocispec.Descriptor{}, nil, errdefs.NotFound(errors.New("id not found")) + } + + return img.Target, namedRef, nil } diff --git a/daemon/containerd/image_import.go b/daemon/containerd/image_import.go index 44d70f5e67556..9e28bfe5cf6a7 100644 --- a/daemon/containerd/image_import.go +++ b/daemon/containerd/image_import.go @@ -1,6 +1,7 @@ package containerd import ( + "context" "io" specs "github.com/opencontainers/image-spec/specs-go/v1" @@ -10,6 +11,6 @@ import ( // inConfig (if src is "-"), or from a URI specified in src. Progress output is // written to outStream. Repository and tag names can optionally be given in // the repo and tag arguments, respectively. -func (i *ImageService) ImportImage(src string, repository string, platform *specs.Platform, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error { +func (i *ImageService) ImportImage(ctx context.Context, src string, repository string, platform *specs.Platform, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error { panic("not implemented") } diff --git a/daemon/containerd/service.go b/daemon/containerd/service.go index 2604daec299e9..a2bf22a981458 100644 --- a/daemon/containerd/service.go +++ b/daemon/containerd/service.go @@ -83,7 +83,7 @@ func (i *ImageService) Cleanup() error { // - newContainer // - to report an error in Daemon.Mount(container) func (i *ImageService) GraphDriverName() string { - return "" + return "containerd-snapshotter" } // ReleaseLayer releases a layer allowing it to be removed diff --git a/daemon/create.go b/daemon/create.go index 4d44d01ca6e77..d2189c9607f03 100644 --- a/daemon/create.go +++ b/daemon/create.go @@ -1,6 +1,7 @@ package daemon // import "github.com/docker/docker/daemon" import ( + "context" "fmt" "net" "runtime" @@ -32,16 +33,16 @@ type createOpts struct { } // CreateManagedContainer creates a container that is managed by a Service -func (daemon *Daemon) CreateManagedContainer(params types.ContainerCreateConfig) (containertypes.CreateResponse, error) { - return daemon.containerCreate(createOpts{ +func (daemon *Daemon) CreateManagedContainer(ctx context.Context, params types.ContainerCreateConfig) (containertypes.CreateResponse, error) { + return daemon.containerCreate(ctx, createOpts{ params: params, managed: true, ignoreImagesArgsEscaped: false}) } // ContainerCreate creates a regular container -func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig) (containertypes.CreateResponse, error) { - return daemon.containerCreate(createOpts{ +func (daemon *Daemon) ContainerCreate(ctx context.Context, params types.ContainerCreateConfig) (containertypes.CreateResponse, error) { + return daemon.containerCreate(ctx, createOpts{ params: params, managed: false, ignoreImagesArgsEscaped: false}) @@ -49,14 +50,14 @@ func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig) (conta // ContainerCreateIgnoreImagesArgsEscaped creates a regular container. This is called from the builder RUN case // and ensures that we do not take the images ArgsEscaped -func (daemon *Daemon) ContainerCreateIgnoreImagesArgsEscaped(params types.ContainerCreateConfig) (containertypes.CreateResponse, error) { - return daemon.containerCreate(createOpts{ +func (daemon *Daemon) ContainerCreateIgnoreImagesArgsEscaped(ctx context.Context, params types.ContainerCreateConfig) (containertypes.CreateResponse, error) { + return daemon.containerCreate(ctx, createOpts{ params: params, managed: false, ignoreImagesArgsEscaped: true}) } -func (daemon *Daemon) containerCreate(opts createOpts) (containertypes.CreateResponse, error) { +func (daemon *Daemon) containerCreate(ctx context.Context, opts createOpts) (containertypes.CreateResponse, error) { start := time.Now() if opts.params.Config == nil { return containertypes.CreateResponse{}, errdefs.InvalidParameter(errors.New("Config cannot be empty in order to create a container")) @@ -68,7 +69,7 @@ func (daemon *Daemon) containerCreate(opts createOpts) (containertypes.CreateRes } if opts.params.Platform == nil && opts.params.Config.Image != "" { - if img, _ := daemon.imageService.GetImage(opts.params.Config.Image, opts.params.Platform); img != nil { + if img, _ := daemon.imageService.GetImage(ctx, opts.params.Config.Image, opts.params.Platform); img != nil { p := maximumSpec() imgPlat := v1.Platform{ OS: img.OS, @@ -95,7 +96,7 @@ func (daemon *Daemon) containerCreate(opts createOpts) (containertypes.CreateRes return containertypes.CreateResponse{Warnings: warnings}, errdefs.InvalidParameter(err) } - ctr, err := daemon.create(opts) + ctr, err := daemon.create(ctx, opts) if err != nil { return containertypes.CreateResponse{Warnings: warnings}, err } @@ -109,7 +110,7 @@ func (daemon *Daemon) containerCreate(opts createOpts) (containertypes.CreateRes } // Create creates a new container from the given configuration with a given name. -func (daemon *Daemon) create(opts createOpts) (retC *container.Container, retErr error) { +func (daemon *Daemon) create(ctx context.Context, opts createOpts) (retC *container.Container, retErr error) { var ( ctr *container.Container img *image.Image @@ -119,7 +120,7 @@ func (daemon *Daemon) create(opts createOpts) (retC *container.Container, retErr ) if opts.params.Config.Image != "" { - img, err = daemon.imageService.GetImage(opts.params.Config.Image, opts.params.Platform) + img, err = daemon.imageService.GetImage(ctx, opts.params.Config.Image, opts.params.Platform) if err != nil { return nil, err } diff --git a/daemon/daemon.go b/daemon/daemon.go index e9c3765042279..9a6cb891c5992 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -203,7 +203,7 @@ func (daemon *Daemon) RegistryHosts() docker.RegistryHosts { return resolver.NewRegistryConfig(m) } -func (daemon *Daemon) restore() error { +func (daemon *Daemon) restore(ctx context.Context) error { var mapLock sync.Mutex containers := make(map[string]*container.Container) @@ -524,7 +524,7 @@ func (daemon *Daemon) restore() error { } } - if err := daemon.containerStart(c, "", "", true); err != nil { + if err := daemon.containerStart(ctx, c, "", "", true); err != nil { log.WithError(err).Error("failed to start container") } close(chNotify) @@ -615,7 +615,7 @@ func (daemon *Daemon) RestartSwarmContainers() { return } - if err := daemon.containerStart(c, "", "", true); err != nil { + if err := daemon.containerStart(ctx, c, "", "", true); err != nil { logrus.WithField("container", c.ID).WithError(err).Error("failed to start swarm container") } @@ -779,7 +779,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S // initialization defer func() { if err != nil { - if err := d.Shutdown(); err != nil { + if err := d.Shutdown(ctx); err != nil { logrus.Error(err) } } @@ -1102,7 +1102,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S return nil, err } - if err := d.restore(); err != nil { + if err := d.restore(ctx); err != nil { return nil, err } close(d.startupDone) @@ -1186,14 +1186,14 @@ func (daemon *Daemon) ShutdownTimeout() int { } // Shutdown stops the daemon. -func (daemon *Daemon) Shutdown() error { +func (daemon *Daemon) Shutdown(ctx context.Context) error { daemon.shutdown = true // Keep mounts and networking running on daemon shutdown if // we are to keep containers running and restore them. if daemon.configStore.LiveRestoreEnabled && daemon.containers != nil { // check if there are any running containers, if none we should do some cleanup - if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil { + if ls, err := daemon.Containers(ctx, &types.ContainerListOptions{}); len(ls) != 0 || err != nil { // metrics plugins still need some cleanup daemon.cleanupMetricsPlugins() return nil diff --git a/daemon/disk_usage.go b/daemon/disk_usage.go index 0af893b6055e9..c61234d6547e5 100644 --- a/daemon/disk_usage.go +++ b/daemon/disk_usage.go @@ -14,7 +14,7 @@ import ( func (daemon *Daemon) ContainerDiskUsage(ctx context.Context) ([]*types.Container, error) { ch := daemon.usage.DoChan("ContainerDiskUsage", func() (interface{}, error) { // Retrieve container list - containers, err := daemon.Containers(&types.ContainerListOptions{ + containers, err := daemon.Containers(nil, &types.ContainerListOptions{ Size: true, All: true, }) diff --git a/daemon/image_service.go b/daemon/image_service.go index 2fe5eddc38e1e..e301891cd1a70 100644 --- a/daemon/image_service.go +++ b/daemon/image_service.go @@ -37,10 +37,10 @@ type ImageService interface { CountImages() int ImageDiskUsage(ctx context.Context) ([]*types.ImageSummary, error) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) - ImportImage(src string, repository string, platform *v1.Platform, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error + ImportImage(ctx context.Context, src string, repository string, platform *v1.Platform, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error TagImage(imageName, repository, tag string) (string, error) TagImageWithReference(imageID image.ID, newTag reference.Named) error - GetImage(refOrID string, platform *v1.Platform) (retImg *image.Image, retErr error) + GetImage(ctx context.Context, refOrID string, platform *v1.Platform) (*image.Image, error) ImageHistory(name string) ([]*imagetype.HistoryResponseItem, error) CommitImage(c backend.CommitConfig) (image.ID, error) SquashImage(id, parent string) (string, error) @@ -62,7 +62,7 @@ type ImageService interface { // Build - MakeImageCache(sourceRefs []string) builder.ImageCache + MakeImageCache(ctx context.Context, cacheFrom []string) builder.ImageCache CommitBuildStep(c backend.CommitConfig) (image.ID, error) // Other diff --git a/daemon/images/cache.go b/daemon/images/cache.go index 445b1b9261831..000d73211861a 100644 --- a/daemon/images/cache.go +++ b/daemon/images/cache.go @@ -1,13 +1,15 @@ package images // import "github.com/docker/docker/daemon/images" import ( + "context" + "github.com/docker/docker/builder" "github.com/docker/docker/image/cache" "github.com/sirupsen/logrus" ) // MakeImageCache creates a stateful image cache. -func (i *ImageService) MakeImageCache(sourceRefs []string) builder.ImageCache { +func (i *ImageService) MakeImageCache(ctx context.Context, sourceRefs []string) builder.ImageCache { if len(sourceRefs) == 0 { return cache.NewLocal(i.imageStore) } @@ -15,7 +17,7 @@ func (i *ImageService) MakeImageCache(sourceRefs []string) builder.ImageCache { cache := cache.New(i.imageStore) for _, ref := range sourceRefs { - img, err := i.GetImage(ref, nil) + img, err := i.GetImage(ctx, ref, nil) if err != nil { logrus.Warnf("Could not look up %s for cache resolution, skipping: %+v", ref, err) continue diff --git a/daemon/images/image.go b/daemon/images/image.go index b6a994cb9dab6..974bf1e7d088b 100644 --- a/daemon/images/image.go +++ b/daemon/images/image.go @@ -148,7 +148,7 @@ func (i *ImageService) manifestMatchesPlatform(img *image.Image, platform specs. } // GetImage returns an image corresponding to the image referred to by refOrID. -func (i *ImageService) GetImage(refOrID string, platform *specs.Platform) (retImg *image.Image, retErr error) { +func (i *ImageService) GetImage(ctx context.Context, refOrID string, platform *specs.Platform) (retImg *image.Image, retErr error) { defer func() { if retErr != nil || retImg == nil || platform == nil { return diff --git a/daemon/images/image_builder.go b/daemon/images/image_builder.go index a5fa0aa46d6fe..430a589ae9088 100644 --- a/daemon/images/image_builder.go +++ b/daemon/images/image_builder.go @@ -167,7 +167,7 @@ func (i *ImageService) pullForBuilder(ctx context.Context, name string, authConf return nil, err } - img, err := i.GetImage(name, platform) + img, err := i.GetImage(nil, name, platform) if errdefs.IsNotFound(err) && img != nil && platform != nil { imgPlat := specs.Platform{ OS: img.OS, @@ -211,7 +211,7 @@ func (i *ImageService) GetImageAndReleasableLayer(ctx context.Context, refOrID s } if opts.PullOption != backend.PullOptionForcePull { - img, err := i.GetImage(refOrID, opts.Platform) + img, err := i.GetImage(nil, refOrID, opts.Platform) if err != nil && opts.PullOption == backend.PullOptionNoPull { return nil, nil, err } diff --git a/daemon/images/image_delete.go b/daemon/images/image_delete.go index b5e1f7d9c3129..5c543b3e94741 100644 --- a/daemon/images/image_delete.go +++ b/daemon/images/image_delete.go @@ -63,7 +63,7 @@ func (i *ImageService) ImageDelete(ctx context.Context, imageRef string, force, start := time.Now() records := []types.ImageDeleteResponseItem{} - img, err := i.GetImage(imageRef, nil) + img, err := i.GetImage(nil, imageRef, nil) if err != nil { return nil, err } diff --git a/daemon/images/image_events.go b/daemon/images/image_events.go index 1d8cfcd914dc8..e89a7c49c7e5e 100644 --- a/daemon/images/image_events.go +++ b/daemon/images/image_events.go @@ -11,7 +11,7 @@ func (i *ImageService) LogImageEvent(imageID, refName, action string) { // LogImageEventWithAttributes generates an event related to an image with specific given attributes. func (i *ImageService) LogImageEventWithAttributes(imageID, refName, action string, attributes map[string]string) { - img, err := i.GetImage(imageID, nil) + img, err := i.GetImage(nil, imageID, nil) if err == nil && img.Config != nil { // image has not been removed yet. // it could be missing if the event is `delete`. diff --git a/daemon/images/image_history.go b/daemon/images/image_history.go index c1f48829526dc..b092b2f76f5ff 100644 --- a/daemon/images/image_history.go +++ b/daemon/images/image_history.go @@ -13,7 +13,7 @@ import ( // name by walking the image lineage. func (i *ImageService) ImageHistory(name string) ([]*image.HistoryResponseItem, error) { start := time.Now() - img, err := i.GetImage(name, nil) + img, err := i.GetImage(nil, name, nil) if err != nil { return nil, err } @@ -69,7 +69,7 @@ func (i *ImageService) ImageHistory(name string) ([]*image.HistoryResponseItem, if id == "" { break } - histImg, err = i.GetImage(id.String(), nil) + histImg, err = i.GetImage(nil, id.String(), nil) if err != nil { break } diff --git a/daemon/images/image_import.go b/daemon/images/image_import.go index d732cf55fadd0..e0d246b52ee91 100644 --- a/daemon/images/image_import.go +++ b/daemon/images/image_import.go @@ -1,6 +1,7 @@ package images // import "github.com/docker/docker/daemon/images" import ( + "context" "encoding/json" "io" "net/http" @@ -29,7 +30,7 @@ import ( // inConfig (if src is "-"), or from a URI specified in src. Progress output is // written to outStream. Repository and tag names can optionally be given in // the repo and tag arguments, respectively. -func (i *ImageService) ImportImage(src string, repository string, platform *specs.Platform, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error { +func (i *ImageService) ImportImage(ctx context.Context, src string, repository string, platform *specs.Platform, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error { var ( rc io.ReadCloser resp *http.Response @@ -62,7 +63,7 @@ func (i *ImageService) ImportImage(src string, repository string, platform *spec if !system.IsOSSupported(platform.OS) { return errdefs.InvalidParameter(system.ErrNotSupportedOperatingSystem) } - config, err := dockerfile.BuildFromConfig(&container.Config{}, changes, platform.OS) + config, err := dockerfile.BuildFromConfig(ctx, &container.Config{}, changes, platform.OS) if err != nil { return err } diff --git a/daemon/images/image_list.go b/daemon/images/image_list.go index e00e6ec5b8b07..343a539672583 100644 --- a/daemon/images/image_list.go +++ b/daemon/images/image_list.go @@ -49,7 +49,7 @@ func (i *ImageService) Images(_ context.Context, opts types.ImageListOptions) ([ err error ) err = opts.Filters.WalkValues("before", func(value string) error { - beforeFilter, err = i.GetImage(value, nil) + beforeFilter, err = i.GetImage(nil, value, nil) return err }) if err != nil { @@ -57,7 +57,7 @@ func (i *ImageService) Images(_ context.Context, opts types.ImageListOptions) ([ } err = opts.Filters.WalkValues("since", func(value string) error { - sinceFilter, err = i.GetImage(value, nil) + sinceFilter, err = i.GetImage(nil, value, nil) return err }) if err != nil { diff --git a/daemon/images/image_pull.go b/daemon/images/image_pull.go index e6efd90fbffe9..18b33d0227405 100644 --- a/daemon/images/image_pull.go +++ b/daemon/images/image_pull.go @@ -63,7 +63,7 @@ func (i *ImageService) PullImage(ctx context.Context, image, tag string, platfor // we allow the image to have a non-matching architecture. The code // below checks for this situation, and returns a warning to the client, // as well as logging it to the daemon logs. - img, err := i.GetImage(image, platform) + img, err := i.GetImage(nil, image, platform) // Note that this is a special case where GetImage returns both an image // and an error: https://github.com/docker/docker/blob/v20.10.7/daemon/images/image.go#L175-L183 diff --git a/daemon/images/image_tag.go b/daemon/images/image_tag.go index becd2e2df3d77..7ffe481e7d4b0 100644 --- a/daemon/images/image_tag.go +++ b/daemon/images/image_tag.go @@ -8,7 +8,7 @@ import ( // TagImage creates the tag specified by newTag, pointing to the image named // imageName (alternatively, imageName can also be an image ID). func (i *ImageService) TagImage(imageName, repository, tag string) (string, error) { - img, err := i.GetImage(imageName, nil) + img, err := i.GetImage(nil, imageName, nil) if err != nil { return "", err } diff --git a/daemon/list.go b/daemon/list.go index b1abe9552f80b..d3fadc3490634 100644 --- a/daemon/list.go +++ b/daemon/list.go @@ -1,6 +1,7 @@ package daemon // import "github.com/docker/docker/daemon" import ( + "context" "fmt" "sort" "strconv" @@ -40,7 +41,7 @@ type iterationAction int // containerReducer represents a reducer for a container. // Returns the object to serialize by the api. -type containerReducer func(*container.Snapshot, *listContext) (*types.Container, error) +type containerReducer func(context.Context, *container.Snapshot, *listContext) (*types.Container, error) const ( // includeContainer is the action to include a container in the reducer. @@ -104,8 +105,8 @@ func (r byCreatedDescending) Less(i, j int) bool { } // Containers returns the list of containers to show given the user's filtering. -func (daemon *Daemon) Containers(config *types.ContainerListOptions) ([]*types.Container, error) { - return daemon.reduceContainers(config, daemon.refreshImage) +func (daemon *Daemon) Containers(ctx context.Context, config *types.ContainerListOptions) ([]*types.Container, error) { + return daemon.reduceContainers(ctx, config, daemon.refreshImage) } func (daemon *Daemon) filterByNameIDMatches(view container.View, ctx *listContext) ([]container.Snapshot, error) { @@ -175,7 +176,7 @@ func (daemon *Daemon) filterByNameIDMatches(view container.View, ctx *listContex } // reduceContainers parses the user's filtering options and generates the list of containers to return based on a reducer. -func (daemon *Daemon) reduceContainers(config *types.ContainerListOptions, reducer containerReducer) ([]*types.Container, error) { +func (daemon *Daemon) reduceContainers(ctx context.Context, config *types.ContainerListOptions, reducer containerReducer) ([]*types.Container, error) { if err := config.Filters.Validate(acceptedPsFilterTags); err != nil { return nil, err } @@ -185,7 +186,7 @@ func (daemon *Daemon) reduceContainers(config *types.ContainerListOptions, reduc containers = []*types.Container{} ) - ctx, err := daemon.foldFilter(view, config) + filter, err := daemon.foldFilter(ctx, view, config) if err != nil { return nil, err } @@ -193,13 +194,13 @@ func (daemon *Daemon) reduceContainers(config *types.ContainerListOptions, reduc // fastpath to only look at a subset of containers if specific name // or ID matches were provided by the user--otherwise we potentially // end up querying many more containers than intended - containerList, err := daemon.filterByNameIDMatches(view, ctx) + containerList, err := daemon.filterByNameIDMatches(view, filter) if err != nil { return nil, err } for i := range containerList { - t, err := daemon.reducePsContainer(&containerList[i], ctx, reducer) + t, err := daemon.reducePsContainer(ctx, &containerList[i], filter, reducer) if err != nil { if err != errStopIteration { return nil, err @@ -208,7 +209,7 @@ func (daemon *Daemon) reduceContainers(config *types.ContainerListOptions, reduc } if t != nil { containers = append(containers, t) - ctx.idx++ + filter.idx++ } } @@ -216,9 +217,9 @@ func (daemon *Daemon) reduceContainers(config *types.ContainerListOptions, reduc } // reducePsContainer is the basic representation for a container as expected by the ps command. -func (daemon *Daemon) reducePsContainer(container *container.Snapshot, ctx *listContext, reducer containerReducer) (*types.Container, error) { +func (daemon *Daemon) reducePsContainer(ctx context.Context, container *container.Snapshot, filter *listContext, reducer containerReducer) (*types.Container, error) { // filter containers to return - switch includeContainerInList(container, ctx) { + switch includeContainerInList(container, filter) { case excludeContainer: return nil, nil case stopIteration: @@ -226,13 +227,13 @@ func (daemon *Daemon) reducePsContainer(container *container.Snapshot, ctx *list } // transform internal container struct into api structs - newC, err := reducer(container, ctx) + newC, err := reducer(ctx, container, filter) if err != nil { return nil, err } // release lock because size calculation is slow - if ctx.Size { + if filter.Size { sizeRw, sizeRootFs := daemon.imageService.GetContainerLayerSize(newC.ID) newC.SizeRw = sizeRw newC.SizeRootFs = sizeRootFs @@ -241,7 +242,7 @@ func (daemon *Daemon) reducePsContainer(container *container.Snapshot, ctx *list } // foldFilter generates the container filter based on the user's filtering options. -func (daemon *Daemon) foldFilter(view container.View, config *types.ContainerListOptions) (*listContext, error) { +func (daemon *Daemon) foldFilter(ctx context.Context, view container.View, config *types.ContainerListOptions) (*listContext, error) { psFilters := config.Filters var filtExited []int @@ -317,7 +318,7 @@ func (daemon *Daemon) foldFilter(view container.View, config *types.ContainerLis if psFilters.Contains("ancestor") { ancestorFilter = true psFilters.WalkValues("ancestor", func(ancestor string) error { - img, err := daemon.imageService.GetImage(ancestor, nil) + img, err := daemon.imageService.GetImage(ctx, ancestor, nil) if err != nil { logrus.Warnf("Error while looking up for image %v", ancestor) return nil @@ -577,11 +578,11 @@ func includeContainerInList(container *container.Snapshot, ctx *listContext) ite } // refreshImage checks if the Image ref still points to the correct ID, and updates the ref to the actual ID when it doesn't -func (daemon *Daemon) refreshImage(s *container.Snapshot, ctx *listContext) (*types.Container, error) { +func (daemon *Daemon) refreshImage(ctx context.Context, s *container.Snapshot, filter *listContext) (*types.Container, error) { c := s.Container image := s.Image // keep the original ref if still valid (hasn't changed) if image != s.ImageID { - img, err := daemon.imageService.GetImage(image, nil) + img, err := daemon.imageService.GetImage(ctx, image, nil) if _, isDNE := err.(images.ErrImageDoesNotExist); err != nil && !isDNE { return nil, err } diff --git a/daemon/list_test.go b/daemon/list_test.go index 955d137aa1a21..15e38abd0dbc5 100644 --- a/daemon/list_test.go +++ b/daemon/list_test.go @@ -1,6 +1,7 @@ package daemon import ( + "context" "os" "path/filepath" "testing" @@ -88,7 +89,7 @@ func TestListInvalidFilter(t *testing.T) { f := filters.NewArgs(filters.Arg("invalid", "foo")) - _, err = d.Containers(&types.ContainerListOptions{ + _, err = d.Containers(context.Background(), &types.ContainerListOptions{ Filters: f, }) assert.Assert(t, is.Error(err, "invalid filter 'invalid'")) @@ -109,7 +110,7 @@ func TestNameFilter(t *testing.T) { // moby/moby #37453 - ^ regex not working due to prefix slash // not being stripped - containerList, err := d.Containers(&types.ContainerListOptions{ + containerList, err := d.Containers(context.Background(), &types.ContainerListOptions{ Filters: filters.NewArgs(filters.Arg("name", "^a")), }) assert.NilError(t, err) @@ -118,7 +119,7 @@ func TestNameFilter(t *testing.T) { assert.Assert(t, containerListContainsName(containerList, two.Name)) // Same as above but with slash prefix should produce the same result - containerListWithPrefix, err := d.Containers(&types.ContainerListOptions{ + containerListWithPrefix, err := d.Containers(context.Background(), &types.ContainerListOptions{ Filters: filters.NewArgs(filters.Arg("name", "^/a")), }) assert.NilError(t, err) @@ -127,7 +128,7 @@ func TestNameFilter(t *testing.T) { assert.Assert(t, containerListContainsName(containerListWithPrefix, two.Name)) // Same as above but make sure it works for exact names - containerList, err = d.Containers(&types.ContainerListOptions{ + containerList, err = d.Containers(context.Background(), &types.ContainerListOptions{ Filters: filters.NewArgs(filters.Arg("name", "b1")), }) assert.NilError(t, err) @@ -135,7 +136,7 @@ func TestNameFilter(t *testing.T) { assert.Assert(t, containerListContainsName(containerList, three.Name)) // Same as above but with slash prefix should produce the same result - containerListWithPrefix, err = d.Containers(&types.ContainerListOptions{ + containerListWithPrefix, err = d.Containers(context.Background(), &types.ContainerListOptions{ Filters: filters.NewArgs(filters.Arg("name", "/b1")), }) assert.NilError(t, err) diff --git a/daemon/monitor.go b/daemon/monitor.go index 9a087283c8a15..601d8561f22e6 100644 --- a/daemon/monitor.go +++ b/daemon/monitor.go @@ -104,7 +104,7 @@ func (daemon *Daemon) handleContainerExit(c *container.Container, e *libcontaine // But containerStart will use daemon.netController segment. // So to avoid panic at startup process, here must wait util daemon restore done. daemon.waitForStartupDone() - if err = daemon.containerStart(c, "", "", false); err != nil { + if err = daemon.containerStart(context.Background(), c, "", "", false); err != nil { logrus.Debugf("failed to restart container: %+v", err) } } diff --git a/daemon/oci_linux.go b/daemon/oci_linux.go index 66ccdc5c6a3d3..01fed13295000 100644 --- a/daemon/oci_linux.go +++ b/daemon/oci_linux.go @@ -1007,7 +1007,7 @@ func WithUser(c *container.Container) coci.SpecOpts { } } -func (daemon *Daemon) createSpec(c *container.Container) (retSpec *specs.Spec, err error) { +func (daemon *Daemon) createSpec(ctx context.Context, c *container.Container) (retSpec *specs.Spec, err error) { var ( opts []coci.SpecOpts s = oci.DefaultSpec() diff --git a/daemon/oci_linux_test.go b/daemon/oci_linux_test.go index 42084c900d03d..d6297b99739a5 100644 --- a/daemon/oci_linux_test.go +++ b/daemon/oci_linux_test.go @@ -1,6 +1,7 @@ package daemon // import "github.com/docker/docker/daemon" import ( + "context" "os" "path/filepath" "testing" @@ -74,7 +75,7 @@ func TestTmpfsDevShmNoDupMount(t *testing.T) { d := setupFakeDaemon(t, c) defer cleanupFakeContainer(c) - _, err := d.createSpec(c) + _, err := d.createSpec(context.TODO(), c) assert.Check(t, err) } @@ -93,7 +94,7 @@ func TestIpcPrivateVsReadonly(t *testing.T) { d := setupFakeDaemon(t, c) defer cleanupFakeContainer(c) - s, err := d.createSpec(c) + s, err := d.createSpec(context.TODO(), c) assert.Check(t, err) // Find the /dev/shm mount in ms, check it does not have ro @@ -123,7 +124,7 @@ func TestSysctlOverride(t *testing.T) { defer cleanupFakeContainer(c) // Ensure that the implicit sysctl is set correctly. - s, err := d.createSpec(c) + s, err := d.createSpec(context.TODO(), c) assert.NilError(t, err) assert.Equal(t, s.Hostname, "foobar") assert.Equal(t, s.Linux.Sysctl["kernel.domainname"], c.Config.Domainname) @@ -139,7 +140,7 @@ func TestSysctlOverride(t *testing.T) { assert.Assert(t, c.HostConfig.Sysctls["kernel.domainname"] != c.Config.Domainname) c.HostConfig.Sysctls["net.ipv4.ip_unprivileged_port_start"] = "1024" - s, err = d.createSpec(c) + s, err = d.createSpec(context.TODO(), c) assert.NilError(t, err) assert.Equal(t, s.Hostname, "foobar") assert.Equal(t, s.Linux.Sysctl["kernel.domainname"], c.HostConfig.Sysctls["kernel.domainname"]) @@ -147,7 +148,7 @@ func TestSysctlOverride(t *testing.T) { // Ensure the ping_group_range is not set on a daemon with user-namespaces enabled d.configStore.RemappedRoot = "dummy:dummy" - s, err = d.createSpec(c) + s, err = d.createSpec(context.TODO(), c) assert.NilError(t, err) _, ok := s.Linux.Sysctl["net.ipv4.ping_group_range"] assert.Assert(t, !ok) @@ -155,7 +156,7 @@ func TestSysctlOverride(t *testing.T) { // Ensure the ping_group_range is set on a container in "host" userns mode // on a daemon with user-namespaces enabled c.HostConfig.UsernsMode = "host" - s, err = d.createSpec(c) + s, err = d.createSpec(context.TODO(), c) assert.NilError(t, err) assert.Equal(t, s.Linux.Sysctl["net.ipv4.ping_group_range"], "0 2147483647") } @@ -175,7 +176,7 @@ func TestSysctlOverrideHost(t *testing.T) { defer cleanupFakeContainer(c) // Ensure that the implicit sysctl is not set - s, err := d.createSpec(c) + s, err := d.createSpec(context.TODO(), c) assert.NilError(t, err) assert.Equal(t, s.Linux.Sysctl["net.ipv4.ip_unprivileged_port_start"], "") assert.Equal(t, s.Linux.Sysctl["net.ipv4.ping_group_range"], "") @@ -183,7 +184,7 @@ func TestSysctlOverrideHost(t *testing.T) { // Set an explicit sysctl. c.HostConfig.Sysctls["net.ipv4.ip_unprivileged_port_start"] = "1024" - s, err = d.createSpec(c) + s, err = d.createSpec(context.TODO(), c) assert.NilError(t, err) assert.Equal(t, s.Linux.Sysctl["net.ipv4.ip_unprivileged_port_start"], c.HostConfig.Sysctls["net.ipv4.ip_unprivileged_port_start"]) } diff --git a/daemon/oci_windows.go b/daemon/oci_windows.go index 886be9ae2542d..432933965625a 100644 --- a/daemon/oci_windows.go +++ b/daemon/oci_windows.go @@ -1,6 +1,7 @@ package daemon // import "github.com/docker/docker/daemon" import ( + "context" "encoding/json" "fmt" "os" @@ -24,9 +25,9 @@ const ( credentialSpecFileLocation = "CredentialSpecs" ) -func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) { +func (daemon *Daemon) createSpec(ctx context.Context, c *container.Container) (*specs.Spec, error) { - img, err := daemon.imageService.GetImage(string(c.ImageID), nil) + img, err := daemon.imageService.GetImage(ctx, string(c.ImageID), nil) if err != nil { return nil, err } diff --git a/daemon/restart.go b/daemon/restart.go index a6c8ddbb24377..33e1f89ce9a63 100644 --- a/daemon/restart.go +++ b/daemon/restart.go @@ -62,7 +62,7 @@ func (daemon *Daemon) containerRestart(ctx context.Context, container *container } } - if err := daemon.containerStart(container, "", "", true); err != nil { + if err := daemon.containerStart(ctx, container, "", "", true); err != nil { return err } diff --git a/daemon/start.go b/daemon/start.go index ecc0f8b8afccc..4ce58a7c00e76 100644 --- a/daemon/start.go +++ b/daemon/start.go @@ -14,7 +14,7 @@ import ( ) // ContainerStart starts a container. -func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.HostConfig, checkpoint string, checkpointDir string) error { +func (daemon *Daemon) ContainerStart(ctx context.Context, name string, hostConfig *containertypes.HostConfig, checkpoint string, checkpointDir string) error { if checkpoint != "" && !daemon.HasExperimental() { return errdefs.InvalidParameter(errors.New("checkpoint is only supported in experimental mode")) } @@ -91,14 +91,14 @@ func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.Hos return errdefs.InvalidParameter(err) } } - return daemon.containerStart(ctr, checkpoint, checkpointDir, true) + return daemon.containerStart(ctx, ctr, checkpoint, checkpointDir, true) } // containerStart prepares the container to run by setting up everything the // container needs, such as storage and networking, as well as links // between containers. The container is left waiting for a signal to // begin running. -func (daemon *Daemon) containerStart(container *container.Container, checkpoint string, checkpointDir string, resetRestartManager bool) (err error) { +func (daemon *Daemon) containerStart(ctx context.Context, container *container.Container, checkpoint string, checkpointDir string, resetRestartManager bool) (err error) { start := time.Now() container.Lock() defer container.Unlock() @@ -150,7 +150,7 @@ func (daemon *Daemon) containerStart(container *container.Container, checkpoint return err } - spec, err := daemon.createSpec(container) + spec, err := daemon.createSpec(ctx, container) if err != nil { return errdefs.System(err) } @@ -176,15 +176,13 @@ func (daemon *Daemon) containerStart(container *container.Container, checkpoint return err } - ctx := context.TODO() - err = daemon.containerd.Create(ctx, container.ID, spec, shim, createOptions) if err != nil { if errdefs.IsConflict(err) { logrus.WithError(err).WithField("container", container.ID).Error("Container not cleaned up from containerd from previous run") // best effort to clean up old container object - daemon.containerd.DeleteTask(ctx, container.ID) - if err := daemon.containerd.Delete(ctx, container.ID); err != nil && !errdefs.IsNotFound(err) { + daemon.containerd.DeleteTask(context.Background(), container.ID) + if err := daemon.containerd.Delete(context.Background(), container.ID); err != nil && !errdefs.IsNotFound(err) { logrus.WithError(err).WithField("container", container.ID).Error("Error cleaning up stale containerd container object") } err = daemon.containerd.Create(ctx, container.ID, spec, shim, createOptions) @@ -195,11 +193,11 @@ func (daemon *Daemon) containerStart(container *container.Container, checkpoint } // TODO(mlaventure): we need to specify checkpoint options here - pid, err := daemon.containerd.Start(context.Background(), container.ID, checkpointDir, + pid, err := daemon.containerd.Start(ctx, container.ID, checkpointDir, container.StreamConfig.Stdin() != nil || container.Config.Tty, container.InitializeStdio) if err != nil { - if err := daemon.containerd.Delete(context.Background(), container.ID); err != nil { + if err := daemon.containerd.Delete(ctx, container.ID); err != nil { logrus.WithError(err).WithField("container", container.ID). Error("failed to delete failed start container") } From d33ed1682b90066c4732fdb9e9f9bef34a9dfed4 Mon Sep 17 00:00:00 2001 From: Nicolas De Loof Date: Fri, 8 Jul 2022 15:28:14 +0200 Subject: [PATCH 03/90] produce progress events polling ctrd's content.Store Signed-off-by: Nicolas De Loof --- daemon/containerd/image_pull.go | 34 +++++++- daemon/containerd/progress.go | 150 ++++++++++++++++++++++++++++++++ 2 files changed, 183 insertions(+), 1 deletion(-) create mode 100644 daemon/containerd/progress.go diff --git a/daemon/containerd/image_pull.go b/daemon/containerd/image_pull.go index 4a2b86ca8f40e..76adf3d23c50b 100644 --- a/daemon/containerd/image_pull.go +++ b/daemon/containerd/image_pull.go @@ -5,6 +5,7 @@ import ( "io" "github.com/containerd/containerd" + "github.com/containerd/containerd/images" "github.com/containerd/containerd/platforms" "github.com/docker/distribution" "github.com/docker/distribution/reference" @@ -44,7 +45,38 @@ func (i *ImageService) PullImage(ctx context.Context, image, tagOrDigest string, resolver := newResolverFromAuthConfig(authConfig) opts = append(opts, containerd.WithResolver(resolver)) - _, err = i.client.Pull(ctx, ref.String(), opts...) + jobs := newJobs() + h := images.HandlerFunc(func(ctx context.Context, desc specs.Descriptor) ([]specs.Descriptor, error) { + if desc.MediaType != images.MediaTypeDockerSchema1Manifest { + jobs.Add(desc) + } + return nil, nil + }) + opts = append(opts, containerd.WithImageHandler(h)) + + stop := make(chan struct{}) + go func() { + showProgress(ctx, jobs, i.client.ContentStore(), outStream, stop) + stop <- struct{}{} + }() + + img, err := i.client.Pull(ctx, ref.String(), opts...) + if err != nil { + return err + } + + unpacked, err := img.IsUnpacked(ctx, containerd.DefaultSnapshotter) + if err != nil { + return err + } + + if !unpacked { + if err := img.Unpack(ctx, containerd.DefaultSnapshotter); err != nil { + return err + } + } + stop <- struct{}{} + <-stop return err } diff --git a/daemon/containerd/progress.go b/daemon/containerd/progress.go new file mode 100644 index 0000000000000..31367a2f64bc0 --- /dev/null +++ b/daemon/containerd/progress.go @@ -0,0 +1,150 @@ +package containerd + +import ( + "context" + "io" + "sync" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/remotes" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" + "github.com/docker/docker/pkg/stringid" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go/v1" +) + +func showProgress(ctx context.Context, ongoing *jobs, cs content.Store, w io.Writer, stop chan struct{}) { + var ( + out = streamformatter.NewJSONProgressOutput(w, false) + ticker = time.NewTicker(100 * time.Millisecond) + start = time.Now() + done bool + ) + defer ticker.Stop() + +outer: + for { + select { + case <-ticker.C: + if !ongoing.IsResolved() { + continue + } + + pulling := map[string]content.Status{} + if !done { + actives, err := cs.ListStatuses(ctx, "") + if err != nil { + log.G(ctx).WithError(err).Error("status check failed") + continue + } + // update status of status entries! + for _, status := range actives { + pulling[status.Ref] = status + } + } + + // update inactive jobs + for _, j := range ongoing.Jobs() { + key := remotes.MakeRefKey(ctx, j) + if info, ok := pulling[key]; ok { + out.WriteProgress(progress.Progress{ + ID: stringid.TruncateID(j.Digest.Encoded()), + Action: "Downloading", + Current: info.Offset, + Total: info.Total, + }) + continue + } + + info, err := cs.Info(ctx, j.Digest) + if err != nil { + if !errdefs.IsNotFound(err) { + log.G(ctx).WithError(err).Error("failed to get content info") + continue outer + } + } else if info.CreatedAt.After(start) { + out.WriteProgress(progress.Progress{ + ID: stringid.TruncateID(j.Digest.Encoded()), + Action: "Download complete", + HideCounts: true, + LastUpdate: true, + }) + ongoing.Remove(j) + } else { + out.WriteProgress(progress.Progress{ + ID: stringid.TruncateID(j.Digest.Encoded()), + Action: "Exists", + HideCounts: true, + LastUpdate: true, + }) + ongoing.Remove(j) + } + } + if done { + return + } + case <-stop: + done = true // allow ui to update once more + case <-ctx.Done(): + return + } + } +} + +// jobs holds a list of layers being downloaded to pull reference set by name +type jobs struct { + name string + resolved bool // resolved is set to true once remote image metadata has been downloaded from registry + descs map[digest.Digest]v1.Descriptor + mu sync.Mutex +} + +// newJobs creates a new instance of the job status tracker +func newJobs() *jobs { + return &jobs{ + descs: map[digest.Digest]v1.Descriptor{}, + } +} + +// IsResolved checks whether a descriptor has been resolved +func (j *jobs) IsResolved() bool { + j.mu.Lock() + defer j.mu.Unlock() + return j.resolved +} + +// Add adds a descriptor to be tracked +func (j *jobs) Add(desc v1.Descriptor) { + j.mu.Lock() + defer j.mu.Unlock() + + if _, ok := j.descs[desc.Digest]; ok { + return + } + j.descs[desc.Digest] = desc + j.resolved = true +} + +// Remove removes a descriptor +func (j *jobs) Remove(desc v1.Descriptor) { + j.mu.Lock() + defer j.mu.Unlock() + + delete(j.descs, desc.Digest) +} + +// Jobs returns a list of all tracked descriptors +func (j *jobs) Jobs() []v1.Descriptor { + j.mu.Lock() + defer j.mu.Unlock() + + descs := make([]v1.Descriptor, 0, len(j.descs)) + for _, d := range j.descs { + descs = append(descs, d) + } + return descs +} From b5b5ae608a2dc6f3637957dde4915f0a25f2c614 Mon Sep 17 00:00:00 2001 From: Djordje Lukic Date: Fri, 8 Jul 2022 14:26:17 +0200 Subject: [PATCH 04/90] Implement ImageDelete for containerd Signed-off-by: Djordje Lukic --- daemon/images/image_delete.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/daemon/images/image_delete.go b/daemon/images/image_delete.go index 5c543b3e94741..2d3cabe264f50 100644 --- a/daemon/images/image_delete.go +++ b/daemon/images/image_delete.go @@ -63,7 +63,7 @@ func (i *ImageService) ImageDelete(ctx context.Context, imageRef string, force, start := time.Now() records := []types.ImageDeleteResponseItem{} - img, err := i.GetImage(nil, imageRef, nil) + img, err := i.GetImage(ctx, imageRef, nil) if err != nil { return nil, err } From 0fc860613d36aaeee81c258f7737484b907ae647 Mon Sep 17 00:00:00 2001 From: Nicolas De Loof Date: Mon, 11 Jul 2022 10:58:39 +0200 Subject: [PATCH 05/90] add support for image tag Signed-off-by: Nicolas De Loof --- api/server/backend/build/backend.go | 4 +-- api/server/backend/build/tag.go | 5 +-- api/server/router/image/backend.go | 2 +- api/server/router/image/image_routes.go | 2 +- daemon/commit.go | 2 +- daemon/containerd/image_tag.go | 43 ++++++++++++++++++++++--- daemon/image_service.go | 4 +-- daemon/images/image_import.go | 2 +- daemon/images/image_tag.go | 8 +++-- 9 files changed, 55 insertions(+), 17 deletions(-) diff --git a/api/server/backend/build/backend.go b/api/server/backend/build/backend.go index 0d81c0138a841..a68c5b4e31d19 100644 --- a/api/server/backend/build/backend.go +++ b/api/server/backend/build/backend.go @@ -21,7 +21,7 @@ import ( // ImageComponent provides an interface for working with images type ImageComponent interface { SquashImage(from string, to string) (string, error) - TagImageWithReference(image.ID, reference.Named) error + TagImageWithReference(context.Context, image.ID, reference.Named) error } // Builder defines interface for running a build @@ -93,7 +93,7 @@ func (b *Backend) Build(ctx context.Context, config backend.BuildConfig) (string fmt.Fprintf(stdout, "Successfully built %s\n", stringid.TruncateID(imageID)) } if imageID != "" { - err = tagger.TagImages(image.ID(imageID)) + err = tagger.TagImages(ctx, image.ID(imageID)) } return imageID, err } diff --git a/api/server/backend/build/tag.go b/api/server/backend/build/tag.go index f840b9d726076..48ae953fbd36a 100644 --- a/api/server/backend/build/tag.go +++ b/api/server/backend/build/tag.go @@ -1,6 +1,7 @@ package build // import "github.com/docker/docker/api/server/backend/build" import ( + "context" "fmt" "io" @@ -31,9 +32,9 @@ func NewTagger(backend ImageComponent, stdout io.Writer, names []string) (*Tagge } // TagImages creates image tags for the imageID -func (bt *Tagger) TagImages(imageID image.ID) error { +func (bt *Tagger) TagImages(ctx context.Context, imageID image.ID) error { for _, rt := range bt.repoAndTags { - if err := bt.imageComponent.TagImageWithReference(imageID, rt); err != nil { + if err := bt.imageComponent.TagImageWithReference(ctx, imageID, rt); err != nil { return err } fmt.Fprintf(bt.stdout, "Successfully tagged %s\n", reference.FamiliarString(rt)) diff --git a/api/server/router/image/backend.go b/api/server/router/image/backend.go index 8921e114b3e41..950422e0e46af 100644 --- a/api/server/router/image/backend.go +++ b/api/server/router/image/backend.go @@ -25,7 +25,7 @@ type imageBackend interface { ImageHistory(imageName string) ([]*image.HistoryResponseItem, error) Images(ctx context.Context, opts types.ImageListOptions) ([]*types.ImageSummary, error) GetImage(ctx context.Context, refOrID string, platform *specs.Platform) (*dockerimage.Image, error) - TagImage(imageName, repository, tag string) (string, error) + TagImage(ctx context.Context, imageName, repository, tag string) (string, error) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) } diff --git a/api/server/router/image/image_routes.go b/api/server/router/image/image_routes.go index 3e06be9ce358c..6843d8a0f67f1 100644 --- a/api/server/router/image/image_routes.go +++ b/api/server/router/image/image_routes.go @@ -336,7 +336,7 @@ func (s *imageRouter) postImagesTag(ctx context.Context, w http.ResponseWriter, if err := httputils.ParseForm(r); err != nil { return err } - if _, err := s.backend.TagImage(vars["name"], r.Form.Get("repo"), r.Form.Get("tag")); err != nil { + if _, err := s.backend.TagImage(ctx, vars["name"], r.Form.Get("repo"), r.Form.Get("tag")); err != nil { return err } w.WriteHeader(http.StatusCreated) diff --git a/daemon/commit.go b/daemon/commit.go index 7b7d4e4446391..fbb02469a33e4 100644 --- a/daemon/commit.go +++ b/daemon/commit.go @@ -171,7 +171,7 @@ func (daemon *Daemon) CreateImageFromContainer(ctx context.Context, name string, var imageRef string if c.Repo != "" { - imageRef, err = daemon.imageService.TagImage(string(id), c.Repo, c.Tag) + imageRef, err = daemon.imageService.TagImage(ctx, string(id), c.Repo, c.Tag) if err != nil { return "", err } diff --git a/daemon/containerd/image_tag.go b/daemon/containerd/image_tag.go index 96c9ce77891f5..b0d01347136fe 100644 --- a/daemon/containerd/image_tag.go +++ b/daemon/containerd/image_tag.go @@ -1,17 +1,52 @@ package containerd import ( + "context" + + containerdimages "github.com/containerd/containerd/images" "github.com/docker/distribution/reference" "github.com/docker/docker/image" + "github.com/sirupsen/logrus" ) // TagImage creates the tag specified by newTag, pointing to the image named // imageName (alternatively, imageName can also be an image ID). -func (i *ImageService) TagImage(imageName, repository, tag string) (string, error) { - panic("not implemented") +func (i *ImageService) TagImage(ctx context.Context, imageName, repository, tag string) (string, error) { + desc, err := i.ResolveImage(ctx, imageName) + if err != nil { + return "", err + } + + newTag, err := reference.ParseNormalizedNamed(repository) + if err != nil { + return "", err + } + if tag != "" { + if newTag, err = reference.WithTag(reference.TrimNamed(newTag), tag); err != nil { + return "", err + } + } + + err = i.TagImageWithReference(ctx, image.ID(desc.Digest), newTag) + return reference.FamiliarString(newTag), err } // TagImageWithReference adds the given reference to the image ID provided. -func (i *ImageService) TagImageWithReference(imageID image.ID, newTag reference.Named) error { - panic("not implemented") +func (i *ImageService) TagImageWithReference(ctx context.Context, imageID image.ID, newTag reference.Named) error { + logrus.Infof("Tagging image %q with reference %q", imageID, newTag.String()) + + desc, err := i.ResolveImage(ctx, imageID.String()) + if err != nil { + return err + } + + img := containerdimages.Image{ + Name: newTag.String(), + Target: desc, + } + + is := i.client.ImageService() + _, err = is.Create(ctx, img) + + return err } diff --git a/daemon/image_service.go b/daemon/image_service.go index e301891cd1a70..1013e4faef315 100644 --- a/daemon/image_service.go +++ b/daemon/image_service.go @@ -38,8 +38,8 @@ type ImageService interface { ImageDiskUsage(ctx context.Context) ([]*types.ImageSummary, error) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) ImportImage(ctx context.Context, src string, repository string, platform *v1.Platform, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error - TagImage(imageName, repository, tag string) (string, error) - TagImageWithReference(imageID image.ID, newTag reference.Named) error + TagImage(ctx context.Context, imageName, repository, tag string) (string, error) + TagImageWithReference(ctx context.Context, imageID image.ID, newTag reference.Named) error GetImage(ctx context.Context, refOrID string, platform *v1.Platform) (*image.Image, error) ImageHistory(name string) ([]*imagetype.HistoryResponseItem, error) CommitImage(c backend.CommitConfig) (image.ID, error) diff --git a/daemon/images/image_import.go b/daemon/images/image_import.go index e0d246b52ee91..8f4fa9c2e07c2 100644 --- a/daemon/images/image_import.go +++ b/daemon/images/image_import.go @@ -134,7 +134,7 @@ func (i *ImageService) ImportImage(ctx context.Context, src string, repository s // FIXME: connect with commit code and call refstore directly if newRef != nil { - if err := i.TagImageWithReference(id, newRef); err != nil { + if err := i.TagImageWithReference(nil, id, newRef); err != nil { return err } } diff --git a/daemon/images/image_tag.go b/daemon/images/image_tag.go index 7ffe481e7d4b0..708b2689398f7 100644 --- a/daemon/images/image_tag.go +++ b/daemon/images/image_tag.go @@ -1,13 +1,15 @@ package images // import "github.com/docker/docker/daemon/images" import ( + "context" + "github.com/docker/distribution/reference" "github.com/docker/docker/image" ) // TagImage creates the tag specified by newTag, pointing to the image named // imageName (alternatively, imageName can also be an image ID). -func (i *ImageService) TagImage(imageName, repository, tag string) (string, error) { +func (i *ImageService) TagImage(ctx context.Context, imageName, repository, tag string) (string, error) { img, err := i.GetImage(nil, imageName, nil) if err != nil { return "", err @@ -23,12 +25,12 @@ func (i *ImageService) TagImage(imageName, repository, tag string) (string, erro } } - err = i.TagImageWithReference(img.ID(), newTag) + err = i.TagImageWithReference(nil, img.ID(), newTag) return reference.FamiliarString(newTag), err } // TagImageWithReference adds the given reference to the image ID provided. -func (i *ImageService) TagImageWithReference(imageID image.ID, newTag reference.Named) error { +func (i *ImageService) TagImageWithReference(ctx context.Context, imageID image.ID, newTag reference.Named) error { if err := i.referenceStore.AddTag(newTag, imageID.Digest(), true); err != nil { return err } From da50abf09278c66ab80e7bdc643a1c311a90d4b7 Mon Sep 17 00:00:00 2001 From: Nicolas De Loof Date: Mon, 11 Jul 2022 11:46:07 +0200 Subject: [PATCH 06/90] add image load/save support Signed-off-by: Nicolas De Loof --- api/server/router/image/backend.go | 4 +-- api/server/router/image/image_routes.go | 4 +-- daemon/containerd/image_exporter.go | 33 +++++++++++++++++++++---- daemon/image_service.go | 4 +-- daemon/images/image_exporter.go | 5 ++-- 5 files changed, 37 insertions(+), 13 deletions(-) diff --git a/api/server/router/image/backend.go b/api/server/router/image/backend.go index 950422e0e46af..5253c8b71d7d1 100644 --- a/api/server/router/image/backend.go +++ b/api/server/router/image/backend.go @@ -30,9 +30,9 @@ type imageBackend interface { } type importExportBackend interface { - LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error + LoadImage(ctx context.Context, inTar io.ReadCloser, outStream io.Writer, quiet bool) error ImportImage(ctx context.Context, src string, repository string, platform *specs.Platform, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error - ExportImage(names []string, outStream io.Writer) error + ExportImage(ctx context.Context, names []string, outStream io.Writer) error } type registryBackend interface { diff --git a/api/server/router/image/image_routes.go b/api/server/router/image/image_routes.go index 6843d8a0f67f1..0e3cfcddce80b 100644 --- a/api/server/router/image/image_routes.go +++ b/api/server/router/image/image_routes.go @@ -137,7 +137,7 @@ func (s *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter, r names = r.Form["names"] } - if err := s.backend.ExportImage(names, output); err != nil { + if err := s.backend.ExportImage(ctx, names, output); err != nil { if !output.Flushed() { return err } @@ -156,7 +156,7 @@ func (s *imageRouter) postImagesLoad(ctx context.Context, w http.ResponseWriter, output := ioutils.NewWriteFlusher(w) defer output.Close() - if err := s.backend.LoadImage(r.Body, output, quiet); err != nil { + if err := s.backend.LoadImage(ctx, r.Body, output, quiet); err != nil { _, _ = output.Write(streamformatter.FormatError(err)) } return nil diff --git a/daemon/containerd/image_exporter.go b/daemon/containerd/image_exporter.go index 6d7c81d64ab10..342a1385d31c6 100644 --- a/daemon/containerd/image_exporter.go +++ b/daemon/containerd/image_exporter.go @@ -1,19 +1,42 @@ package containerd -import "io" +import ( + "context" + "io" + + "github.com/containerd/containerd" + "github.com/containerd/containerd/images/archive" + "github.com/containerd/containerd/platforms" + "github.com/docker/distribution/reference" +) // ExportImage exports a list of images to the given output stream. The // exported images are archived into a tar when written to the output // stream. All images with the given tag and all versions containing // the same tag are exported. names is the set of tags to export, and // outStream is the writer which the images are written to. -func (i *ImageService) ExportImage(names []string, outStream io.Writer) error { - panic("not implemented") +func (i *ImageService) ExportImage(ctx context.Context, names []string, outStream io.Writer) error { + opts := []archive.ExportOpt{ + archive.WithPlatform(platforms.Ordered(platforms.DefaultSpec())), + archive.WithSkipNonDistributableBlobs(), + } + is := i.client.ImageService() + for _, imageRef := range names { + named, err := reference.ParseDockerRef(imageRef) + if err != nil { + return err + } + opts = append(opts, archive.WithImage(is, named.String())) + } + return i.client.Export(ctx, outStream, opts...) } // LoadImage uploads a set of images into the repository. This is the // complement of ExportImage. The input stream is an uncompressed tar // ball containing images and metadata. -func (i *ImageService) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { - panic("not implemented") +func (i *ImageService) LoadImage(ctx context.Context, inTar io.ReadCloser, outStream io.Writer, quiet bool) error { + _, err := i.client.Import(ctx, inTar, + containerd.WithImportPlatform(platforms.DefaultStrict()), + ) + return err } diff --git a/daemon/image_service.go b/daemon/image_service.go index 1013e4faef315..e56b31cd4ac2b 100644 --- a/daemon/image_service.go +++ b/daemon/image_service.go @@ -29,8 +29,8 @@ type ImageService interface { PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *registry.AuthConfig, outStream io.Writer) error CreateImage(config []byte, parent string) (builder.Image, error) ImageDelete(ctx context.Context, imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error) - ExportImage(names []string, outStream io.Writer) error - LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error + ExportImage(ctx context.Context, names []string, outStream io.Writer) error + LoadImage(ctx context.Context, inTar io.ReadCloser, outStream io.Writer, quiet bool) error Images(ctx context.Context, opts types.ImageListOptions) ([]*types.ImageSummary, error) LogImageEvent(imageID, refName, action string) LogImageEventWithAttributes(imageID, refName, action string, attributes map[string]string) diff --git a/daemon/images/image_exporter.go b/daemon/images/image_exporter.go index 037a694b45597..2ab4af1a83285 100644 --- a/daemon/images/image_exporter.go +++ b/daemon/images/image_exporter.go @@ -1,6 +1,7 @@ package images // import "github.com/docker/docker/daemon/images" import ( + "context" "io" "github.com/docker/docker/image/tarexport" @@ -11,7 +12,7 @@ import ( // stream. All images with the given tag and all versions containing // the same tag are exported. names is the set of tags to export, and // outStream is the writer which the images are written to. -func (i *ImageService) ExportImage(names []string, outStream io.Writer) error { +func (i *ImageService) ExportImage(ctx context.Context, names []string, outStream io.Writer) error { imageExporter := tarexport.NewTarExporter(i.imageStore, i.layerStore, i.referenceStore, i) return imageExporter.Save(names, outStream) } @@ -19,7 +20,7 @@ func (i *ImageService) ExportImage(names []string, outStream io.Writer) error { // LoadImage uploads a set of images into the repository. This is the // complement of ExportImage. The input stream is an uncompressed tar // ball containing images and metadata. -func (i *ImageService) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { +func (i *ImageService) LoadImage(ctx context.Context, inTar io.ReadCloser, outStream io.Writer, quiet bool) error { imageExporter := tarexport.NewTarExporter(i.imageStore, i.layerStore, i.referenceStore, i) return imageExporter.Load(inTar, outStream, quiet) } From af2911a8767fc5a7c7a9793aa26d17c6968d2a68 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Gronowski?= Date: Thu, 7 Jul 2022 13:54:50 +0200 Subject: [PATCH 07/90] containerd: Unpack loaded images MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Paweł Gronowski --- daemon/containerd/image_exporter.go | 32 +++++++++++++++++++++++++---- 1 file changed, 28 insertions(+), 4 deletions(-) diff --git a/daemon/containerd/image_exporter.go b/daemon/containerd/image_exporter.go index 342a1385d31c6..3f99718f1017f 100644 --- a/daemon/containerd/image_exporter.go +++ b/daemon/containerd/image_exporter.go @@ -8,6 +8,8 @@ import ( "github.com/containerd/containerd/images/archive" "github.com/containerd/containerd/platforms" "github.com/docker/distribution/reference" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // ExportImage exports a list of images to the given output stream. The @@ -35,8 +37,30 @@ func (i *ImageService) ExportImage(ctx context.Context, names []string, outStrea // complement of ExportImage. The input stream is an uncompressed tar // ball containing images and metadata. func (i *ImageService) LoadImage(ctx context.Context, inTar io.ReadCloser, outStream io.Writer, quiet bool) error { - _, err := i.client.Import(ctx, inTar, - containerd.WithImportPlatform(platforms.DefaultStrict()), - ) - return err + platform := platforms.DefaultStrict() + imgs, err := i.client.Import(ctx, inTar, containerd.WithImportPlatform(platform)) + + if err != nil { + logrus.WithError(err).Error("Failed to import image to containerd") + return errors.Wrapf(err, "Failed to import image") + } + + for _, img := range imgs { + platformImg := containerd.NewImageWithPlatform(i.client, img, platform) + + unpacked, err := platformImg.IsUnpacked(ctx, containerd.DefaultSnapshotter) + if err != nil { + logrus.WithError(err).WithField("image", img.Name).Error("IsUnpacked failed") + continue + } + + if !unpacked { + err := platformImg.Unpack(ctx, containerd.DefaultSnapshotter) + if err != nil { + logrus.WithError(err).WithField("image", img.Name).Error("Failed to unpack image") + return errors.Wrapf(err, "Failed to unpack image") + } + } + } + return nil } From dea914032f1386e00d5e7b8fa255811a1f228fae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Gronowski?= Date: Fri, 8 Jul 2022 15:12:35 +0200 Subject: [PATCH 08/90] containerd: Implement push MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Paweł Gronowski --- daemon/containerd/image_push.go | 63 ++++++++++++++++++++++++++++++++- 1 file changed, 62 insertions(+), 1 deletion(-) diff --git a/daemon/containerd/image_push.go b/daemon/containerd/image_push.go index 73390f0406dbd..a02bec8d272a1 100644 --- a/daemon/containerd/image_push.go +++ b/daemon/containerd/image_push.go @@ -4,10 +4,71 @@ import ( "context" "io" + "github.com/containerd/containerd" + containerdimages "github.com/containerd/containerd/images" + "github.com/containerd/containerd/images/converter" + "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/remotes" + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types/registry" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) // PushImage initiates a push operation on the repository named localName. func (i *ImageService) PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *registry.AuthConfig, outStream io.Writer) error { - panic("not implemented") + // TODO: Pass this from user? + platformMatcher := platforms.DefaultStrict() + + ref, err := reference.ParseNormalizedNamed(image) + if err != nil { + return err + } + if tag != "" { + // Push by digest is not supported, so only tags are supported. + ref, err = reference.WithTag(ref, tag) + if err != nil { + return err + } + } + + is := i.client.ImageService() + + img, err := is.Get(ctx, ref.String()) + if err != nil { + return errors.Wrap(err, "Failed to get image") + } + + target := img.Target + + // Create a temporary image which is stripped from content that references other platforms. + // We or the remote may not have them and referencing them will end with an error. + if platformMatcher != platforms.All { + tmpRef := ref.String() + "-tmp-platformspecific" + platformImg, err := converter.Convert(ctx, i.client, tmpRef, ref.String(), converter.WithPlatform(platformMatcher)) + if err != nil { + return errors.Wrap(err, "Failed to convert image to platform specific") + } + + target = platformImg.Target + defer i.client.ImageService().Delete(ctx, platformImg.Name, containerdimages.SynchronousDelete()) + } + + imageHandler := containerdimages.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { + logrus.WithField("desc", desc).Debug("Pushing") + return nil, nil + }) + imageHandler = remotes.SkipNonDistributableBlobs(imageHandler) + + resolver := newResolverFromAuthConfig(authConfig) + + logrus.WithField("desc", target).WithField("ref", ref.String()).Info("Pushing desc to remote ref") + err = i.client.Push(ctx, ref.String(), target, + containerd.WithResolver(resolver), + containerd.WithPlatformMatcher(platformMatcher), + containerd.WithImageHandler(imageHandler), + ) + + return err } From 8c90f8868fc8472af42f40235f87977724f18248 Mon Sep 17 00:00:00 2001 From: Nicolas De Loof Date: Mon, 11 Jul 2022 17:42:13 +0200 Subject: [PATCH 09/90] compute image's shared size Signed-off-by: Nicolas De Loof --- daemon/containerd/image_list.go | 78 ++++++++++++++++++++++++++++----- 1 file changed, 66 insertions(+), 12 deletions(-) diff --git a/daemon/containerd/image_list.go b/daemon/containerd/image_list.go index 18b3c10ef1f27..05c040a3c7caf 100644 --- a/daemon/containerd/image_list.go +++ b/daemon/containerd/image_list.go @@ -4,10 +4,10 @@ import ( "context" "github.com/containerd/containerd" - "github.com/containerd/containerd/snapshots" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" + "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/identity" ) @@ -38,19 +38,52 @@ func (i *ImageService) Images(ctx context.Context, opts types.ImageListOptions) } snapshotter := i.client.SnapshotService(containerd.DefaultSnapshotter) + sizeCache := make(map[digest.Digest]int64) + snapshotSizeFn := func(d digest.Digest) (int64, error) { + if s, ok := sizeCache[d]; ok { + return s, nil + } + usage, err := snapshotter.Usage(ctx, d.String()) + if err != nil { + return 0, err + } + sizeCache[d] = usage.Size + return usage.Size, nil + } - var ret []*types.ImageSummary - for _, img := range imgs { + ret := make([]*types.ImageSummary, 0, len(imgs)) + var ( + root []*[]digest.Digest + layers map[digest.Digest]int + ) + if opts.SharedSize { + root = make([]*[]digest.Digest, len(imgs)) + layers = make(map[digest.Digest]int) + } + + for n, img := range imgs { if !filter(img) { continue } + diffIDs, err := img.RootFS(ctx) + if err != nil { + return nil, err + } + chainIDs := identity.ChainIDs(diffIDs) + if opts.SharedSize { + root[n] = &chainIDs + for _, id := range chainIDs { + layers[id] = layers[id] + 1 + } + } + size, err := img.Size(ctx) if err != nil { return nil, err } - virtualSize, err := computeVirtualSize(ctx, img, snapshotter) + virtualSize, err := computeVirtualSize(chainIDs, snapshotSizeFn) if err != nil { return nil, err } @@ -68,6 +101,16 @@ func (i *ImageService) Images(ctx context.Context, opts types.ImageListOptions) }) } + if opts.SharedSize { + for i, chainIDs := range root { + sharedSize, err := computeSharedSize(*chainIDs, layers, snapshotSizeFn) + if err != nil { + return nil, err + } + ret[i].SharedSize = sharedSize + } + } + return ret, nil } @@ -131,18 +174,29 @@ func (i *ImageService) setupFilters(ctx context.Context, imageFilters filters.Ar }, nil } -func computeVirtualSize(ctx context.Context, image containerd.Image, snapshotter snapshots.Snapshotter) (int64, error) { +func computeVirtualSize(chainIDs []digest.Digest, sizeFn func(d digest.Digest) (int64, error)) (int64, error) { var virtualSize int64 - diffIDs, err := image.RootFS(ctx) - if err != nil { - return virtualSize, err - } - for _, chainID := range identity.ChainIDs(diffIDs) { - usage, err := snapshotter.Usage(ctx, chainID.String()) + for _, chainID := range chainIDs { + size, err := sizeFn(chainID) if err != nil { return virtualSize, err } - virtualSize += usage.Size + virtualSize += size } return virtualSize, nil } + +func computeSharedSize(chainIDs []digest.Digest, layers map[digest.Digest]int, sizeFn func(d digest.Digest) (int64, error)) (int64, error) { + var sharedSize int64 + for _, chainID := range chainIDs { + if layers[chainID] == 1 { + continue + } + size, err := sizeFn(chainID) + if err != nil { + return 0, err + } + sharedSize += size + } + return sharedSize, nil +} From 215028fdc206aa68468dd7e1331cdcf6ae82bf1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Gronowski?= Date: Wed, 6 Jul 2022 11:42:37 +0200 Subject: [PATCH 10/90] containerd: Push progress MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Paweł Gronowski --- daemon/containerd/image_pull.go | 11 +- daemon/containerd/image_push.go | 19 +++- daemon/containerd/progress.go | 195 +++++++++++++++++++++----------- daemon/containerd/resolver.go | 10 +- 4 files changed, 157 insertions(+), 78 deletions(-) diff --git a/daemon/containerd/image_pull.go b/daemon/containerd/image_pull.go index 76adf3d23c50b..bcc6e9ff7e48b 100644 --- a/daemon/containerd/image_pull.go +++ b/daemon/containerd/image_pull.go @@ -42,7 +42,7 @@ func (i *ImageService) PullImage(ctx context.Context, image, tagOrDigest string, } } - resolver := newResolverFromAuthConfig(authConfig) + resolver, _ := newResolverFromAuthConfig(authConfig) opts = append(opts, containerd.WithResolver(resolver)) jobs := newJobs() @@ -54,11 +54,8 @@ func (i *ImageService) PullImage(ctx context.Context, image, tagOrDigest string, }) opts = append(opts, containerd.WithImageHandler(h)) - stop := make(chan struct{}) - go func() { - showProgress(ctx, jobs, i.client.ContentStore(), outStream, stop) - stop <- struct{}{} - }() + finishProgress := showProgress(ctx, jobs, outStream, pullProgress(i.client.ContentStore())) + defer finishProgress() img, err := i.client.Pull(ctx, ref.String(), opts...) if err != nil { @@ -75,8 +72,6 @@ func (i *ImageService) PullImage(ctx context.Context, image, tagOrDigest string, return err } } - stop <- struct{}{} - <-stop return err } diff --git a/daemon/containerd/image_push.go b/daemon/containerd/image_push.go index a02bec8d272a1..2a1b3451871ce 100644 --- a/daemon/containerd/image_push.go +++ b/daemon/containerd/image_push.go @@ -34,6 +34,7 @@ func (i *ImageService) PushImage(ctx context.Context, image, tag string, metaHea } is := i.client.ImageService() + store := i.client.ContentStore() img, err := is.Get(ctx, ref.String()) if err != nil { @@ -55,13 +56,29 @@ func (i *ImageService) PushImage(ctx context.Context, image, tag string, metaHea defer i.client.ImageService().Delete(ctx, platformImg.Name, containerdimages.SynchronousDelete()) } + jobs := newJobs() + imageHandler := containerdimages.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { logrus.WithField("desc", desc).Debug("Pushing") + if desc.MediaType != containerdimages.MediaTypeDockerSchema1Manifest { + children, err := containerdimages.Children(ctx, store, desc) + + if err == nil { + for _, c := range children { + jobs.Add(c) + } + } + + jobs.Add(desc) + } return nil, nil }) imageHandler = remotes.SkipNonDistributableBlobs(imageHandler) - resolver := newResolverFromAuthConfig(authConfig) + resolver, tracker := newResolverFromAuthConfig(authConfig) + + finishProgress := showProgress(ctx, jobs, outStream, pushProgress(tracker)) + defer finishProgress() logrus.WithField("desc", target).WithField("ref", ref.String()).Info("Pushing desc to remote ref") err = i.client.Push(ctx, ref.String(), target, diff --git a/daemon/containerd/progress.go b/daemon/containerd/progress.go index 31367a2f64bc0..b71f0971fca55 100644 --- a/daemon/containerd/progress.go +++ b/daemon/containerd/progress.go @@ -7,106 +7,169 @@ import ( "time" "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" + cerrdefs "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/log" "github.com/containerd/containerd/remotes" + "github.com/containerd/containerd/remotes/docker" "github.com/docker/docker/pkg/progress" "github.com/docker/docker/pkg/streamformatter" "github.com/docker/docker/pkg/stringid" "github.com/opencontainers/go-digest" - "github.com/opencontainers/image-spec/specs-go/v1" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" ) -func showProgress(ctx context.Context, ongoing *jobs, cs content.Store, w io.Writer, stop chan struct{}) { +type updateProgressFunc func(ctx context.Context, ongoing *jobs, output progress.Output, start time.Time) error + +func showProgress(ctx context.Context, ongoing *jobs, w io.Writer, updateFunc updateProgressFunc) func() { + stop := make(chan struct{}) + ctx, cancelProgress := context.WithCancel(ctx) + var ( out = streamformatter.NewJSONProgressOutput(w, false) ticker = time.NewTicker(100 * time.Millisecond) start = time.Now() done bool ) - defer ticker.Stop() -outer: - for { - select { - case <-ticker.C: - if !ongoing.IsResolved() { - continue - } + for _, j := range ongoing.Jobs() { + id := stringid.TruncateID(j.Digest.Encoded()) + progress.Update(out, id, "Preparing") + } - pulling := map[string]content.Status{} - if !done { - actives, err := cs.ListStatuses(ctx, "") - if err != nil { - log.G(ctx).WithError(err).Error("status check failed") + go func() { + defer func() { + ticker.Stop() + stop <- struct{}{} + }() + + for { + select { + case <-ticker.C: + if !ongoing.IsResolved() { continue } - // update status of status entries! - for _, status := range actives { - pulling[status.Ref] = status + err := updateFunc(ctx, ongoing, out, start) + if err != nil { + logrus.WithError(err).Error("Updating progress failed") + return } - } - // update inactive jobs - for _, j := range ongoing.Jobs() { - key := remotes.MakeRefKey(ctx, j) - if info, ok := pulling[key]; ok { - out.WriteProgress(progress.Progress{ - ID: stringid.TruncateID(j.Digest.Encoded()), - Action: "Downloading", - Current: info.Offset, - Total: info.Total, - }) - continue + if done { + return } + case <-ctx.Done(): + done = true + } + } + }() - info, err := cs.Info(ctx, j.Digest) - if err != nil { - if !errdefs.IsNotFound(err) { - log.G(ctx).WithError(err).Error("failed to get content info") - continue outer - } - } else if info.CreatedAt.After(start) { - out.WriteProgress(progress.Progress{ - ID: stringid.TruncateID(j.Digest.Encoded()), - Action: "Download complete", - HideCounts: true, - LastUpdate: true, - }) - ongoing.Remove(j) + return func() { + cancelProgress() + <-stop + } +} + +func pushProgress(tracker docker.StatusTracker) updateProgressFunc { + return func(ctx context.Context, ongoing *jobs, out progress.Output, start time.Time) error { + for _, j := range ongoing.Jobs() { + key := remotes.MakeRefKey(ctx, j) + id := stringid.TruncateID(j.Digest.Encoded()) + + status, err := tracker.GetStatus(key) + if err != nil { + if cerrdefs.IsNotFound(err) { + progress.Update(out, id, "Waiting") + continue } else { - out.WriteProgress(progress.Progress{ - ID: stringid.TruncateID(j.Digest.Encoded()), - Action: "Exists", - HideCounts: true, - LastUpdate: true, - }) - ongoing.Remove(j) + return err } + + } + + logrus.WithField("status", status).WithField("id", id).Debug("Status update") + + if status.Committed && status.Offset >= status.Total { + progress.Update(out, id, "Pushed") + ongoing.Remove(j) + continue + } + + out.WriteProgress(progress.Progress{ + ID: id, + Action: "Pushing", + Current: status.Offset, + Total: status.Total, + }) + } + + return nil + } +} + +func pullProgress(cs content.Store) updateProgressFunc { + return func(ctx context.Context, ongoing *jobs, out progress.Output, start time.Time) error { + pulling := map[string]content.Status{} + actives, err := cs.ListStatuses(ctx, "") + if err != nil { + log.G(ctx).WithError(err).Error("status check failed") + return nil + } + // update status of status entries! + for _, status := range actives { + pulling[status.Ref] = status + } + + for _, j := range ongoing.Jobs() { + key := remotes.MakeRefKey(ctx, j) + if info, ok := pulling[key]; ok { + out.WriteProgress(progress.Progress{ + ID: stringid.TruncateID(j.Digest.Encoded()), + Action: "Downloading", + Current: info.Offset, + Total: info.Total, + }) + continue } - if done { - return + + info, err := cs.Info(ctx, j.Digest) + if err != nil { + if !cerrdefs.IsNotFound(err) { + return err + } + } else if info.CreatedAt.After(start) { + out.WriteProgress(progress.Progress{ + ID: stringid.TruncateID(j.Digest.Encoded()), + Action: "Download complete", + HideCounts: true, + LastUpdate: true, + }) + ongoing.Remove(j) + } else { + out.WriteProgress(progress.Progress{ + ID: stringid.TruncateID(j.Digest.Encoded()), + Action: "Exists", + HideCounts: true, + LastUpdate: true, + }) + ongoing.Remove(j) } - case <-stop: - done = true // allow ui to update once more - case <-ctx.Done(): - return } + return nil } } -// jobs holds a list of layers being downloaded to pull reference set by name type jobs struct { name string - resolved bool // resolved is set to true once remote image metadata has been downloaded from registry - descs map[digest.Digest]v1.Descriptor + resolved bool // resolved is set to true once all jobs are added + descs map[digest.Digest]ocispec.Descriptor mu sync.Mutex } // newJobs creates a new instance of the job status tracker func newJobs() *jobs { return &jobs{ - descs: map[digest.Digest]v1.Descriptor{}, + descs: map[digest.Digest]ocispec.Descriptor{}, } } @@ -118,7 +181,7 @@ func (j *jobs) IsResolved() bool { } // Add adds a descriptor to be tracked -func (j *jobs) Add(desc v1.Descriptor) { +func (j *jobs) Add(desc ocispec.Descriptor) { j.mu.Lock() defer j.mu.Unlock() @@ -130,7 +193,7 @@ func (j *jobs) Add(desc v1.Descriptor) { } // Remove removes a descriptor -func (j *jobs) Remove(desc v1.Descriptor) { +func (j *jobs) Remove(desc ocispec.Descriptor) { j.mu.Lock() defer j.mu.Unlock() @@ -138,11 +201,11 @@ func (j *jobs) Remove(desc v1.Descriptor) { } // Jobs returns a list of all tracked descriptors -func (j *jobs) Jobs() []v1.Descriptor { +func (j *jobs) Jobs() []ocispec.Descriptor { j.mu.Lock() defer j.mu.Unlock() - descs := make([]v1.Descriptor, 0, len(j.descs)) + descs := make([]ocispec.Descriptor, 0, len(j.descs)) for _, d := range j.descs { descs = append(descs, d) } diff --git a/daemon/containerd/resolver.go b/daemon/containerd/resolver.go index b172d6c661553..89cb81b3ecf7d 100644 --- a/daemon/containerd/resolver.go +++ b/daemon/containerd/resolver.go @@ -6,8 +6,9 @@ import ( registrytypes "github.com/docker/docker/api/types/registry" ) -func newResolverFromAuthConfig(authConfig *registrytypes.AuthConfig) remotes.Resolver { +func newResolverFromAuthConfig(authConfig *registrytypes.AuthConfig) (remotes.Resolver, docker.StatusTracker) { opts := []docker.RegistryOpt{} + if authConfig != nil { authorizer := docker.NewDockerAuthorizer(docker.WithAuthCreds(func(_ string) (string, string, error) { if authConfig.IdentityToken != "" { @@ -19,7 +20,10 @@ func newResolverFromAuthConfig(authConfig *registrytypes.AuthConfig) remotes.Res opts = append(opts, docker.WithAuthorizer(authorizer)) } + tracker := docker.NewInMemoryTracker() + return docker.NewResolver(docker.ResolverOptions{ - Hosts: docker.ConfigureDefaultRegistries(opts...), - }) + Hosts: docker.ConfigureDefaultRegistries(opts...), + Tracker: tracker, + }), tracker } From 7d8b3ff61e44130702c5b95ebc351e77e8d64484 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Gronowski?= Date: Tue, 19 Jul 2022 13:26:03 +0200 Subject: [PATCH 11/90] daemon/containerd: Implement prune MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Initial version that doesn't handle any filters Signed-off-by: Paweł Gronowski --- daemon/containerd/image_prune.go | 85 +++++++++++++++++++++++++++++++- 1 file changed, 84 insertions(+), 1 deletion(-) diff --git a/daemon/containerd/image_prune.go b/daemon/containerd/image_prune.go index c32efa02c310a..2a9bf4fdda874 100644 --- a/daemon/containerd/image_prune.go +++ b/daemon/containerd/image_prune.go @@ -2,12 +2,95 @@ package containerd import ( "context" + "fmt" + "github.com/containerd/containerd/content" + cerrdefs "github.com/containerd/containerd/errdefs" + containerdimages "github.com/containerd/containerd/images" + "github.com/containerd/containerd/platforms" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" ) // ImagesPrune removes unused images +// TODO: handle pruneFilters func (i *ImageService) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) { - panic("not implemented") + is := i.client.ImageService() + store := i.client.ContentStore() + + images, err := is.List(ctx) + if err != nil { + return nil, errors.Wrapf(err, "Failed to list images") + } + + platform := platforms.DefaultStrict() + report := types.ImagesPruneReport{} + toDelete := map[digest.Digest]uint64{} + errs := []error{} + + for _, img := range images { + err := getContentDigestsWithSizes(ctx, img, store, platform, toDelete) + if err != nil { + errs = append(errs, err) + continue + } + } + + for digest, size := range toDelete { + report.SpaceReclaimed += size + report.ImagesDeleted = append(report.ImagesDeleted, + types.ImageDeleteResponseItem{ + Deleted: digest.String(), + }, + ) + } + + for _, img := range images { + err = is.Delete(ctx, img.Name, containerdimages.SynchronousDelete()) + if err != nil && !cerrdefs.IsNotFound(err) { + errs = append(errs, err) + continue + } + + report.ImagesDeleted = append(report.ImagesDeleted, + types.ImageDeleteResponseItem{ + Untagged: img.Name, + }, + ) + } + + if len(errs) > 0 { + return &report, combineErrors(errs) + } + + return &report, nil +} + +func getContentDigestsWithSizes(ctx context.Context, img containerdimages.Image, store content.Store, platform platforms.MatchComparer, toDelete map[digest.Digest]uint64) error { + return containerdimages.Walk(ctx, containerdimages.Handlers(containerdimages.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if desc.Size < 0 { + return nil, fmt.Errorf("invalid size %v in %v (%v)", desc.Size, desc.Digest, desc.MediaType) + } + toDelete[desc.Digest] = uint64(desc.Size) + return nil, nil + }), containerdimages.LimitManifests(containerdimages.FilterPlatforms(containerdimages.ChildrenHandler(store), platform), platform, 1)), img.Target) +} + +func combineErrors(errs []error) error { + if len(errs) == 1 { + return errs[0] + } + + errString := "" + for _, err := range errs { + if errString != "" { + errString += "\n" + } + errString += err.Error() + } + + return errors.Errorf("Multiple errors encountered:\n%s", errString) } From d2ecfa6c301c1898e244581707bad5e17aee7fa9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Gronowski?= Date: Thu, 21 Jul 2022 18:19:00 +0200 Subject: [PATCH 12/90] containerd/auth: Check if registry hostname matches MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Paweł Gronowski --- daemon/containerd/resolver.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/daemon/containerd/resolver.go b/daemon/containerd/resolver.go index 89cb81b3ecf7d..3a16ff2615868 100644 --- a/daemon/containerd/resolver.go +++ b/daemon/containerd/resolver.go @@ -4,13 +4,23 @@ import ( "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/registry" + "github.com/sirupsen/logrus" ) func newResolverFromAuthConfig(authConfig *registrytypes.AuthConfig) (remotes.Resolver, docker.StatusTracker) { opts := []docker.RegistryOpt{} if authConfig != nil { - authorizer := docker.NewDockerAuthorizer(docker.WithAuthCreds(func(_ string) (string, string, error) { + cfgHost := registry.ConvertToHostname(authConfig.ServerAddress) + if cfgHost == registry.IndexHostname { + cfgHost = registry.DefaultRegistryHost + } + authorizer := docker.NewDockerAuthorizer(docker.WithAuthCreds(func(host string) (string, string, error) { + if cfgHost != host { + logrus.WithField("host", host).WithField("cfgHost", cfgHost).Warn("Host doesn't match") + return "", "", nil + } if authConfig.IdentityToken != "" { return "", authConfig.IdentityToken, nil } From dde365f96e8ce9f3f1f9d3051e5ee33181272e45 Mon Sep 17 00:00:00 2001 From: Nicolas De Loof Date: Tue, 12 Jul 2022 15:52:29 +0200 Subject: [PATCH 13/90] compute container's layer size Signed-off-by: Nicolas De Loof --- api/server/router/container/backend.go | 2 +- api/server/router/container/inspect.go | 2 +- daemon/cluster/executor/backend.go | 2 +- daemon/cluster/executor/container/adapter.go | 2 +- daemon/containerd/service.go | 44 +++++++++++++++++++- daemon/image_service.go | 2 +- daemon/images/image_unix.go | 8 ++-- daemon/images/image_windows.go | 6 ++- daemon/inspect.go | 14 ++++--- daemon/inspect_linux.go | 4 +- daemon/inspect_windows.go | 6 ++- daemon/list.go | 5 ++- daemon/prune.go | 7 +++- 13 files changed, 81 insertions(+), 23 deletions(-) diff --git a/api/server/router/container/backend.go b/api/server/router/container/backend.go index f0624eef836b3..eea44d8b7a32f 100644 --- a/api/server/router/container/backend.go +++ b/api/server/router/container/backend.go @@ -49,7 +49,7 @@ type stateBackend interface { // monitorBackend includes functions to implement to provide containers monitoring functionality. type monitorBackend interface { ContainerChanges(name string) ([]archive.Change, error) - ContainerInspect(name string, size bool, version string) (interface{}, error) + ContainerInspect(ctx context.Context, name string, size bool, version string) (interface{}, error) ContainerLogs(ctx context.Context, name string, config *types.ContainerLogsOptions) (msgs <-chan *backend.LogMessage, tty bool, err error) ContainerStats(ctx context.Context, name string, config *backend.ContainerStatsConfig) error ContainerTop(name string, psArgs string) (*container.ContainerTopOKBody, error) diff --git a/api/server/router/container/inspect.go b/api/server/router/container/inspect.go index 5c78d15bc912d..c905c969b8906 100644 --- a/api/server/router/container/inspect.go +++ b/api/server/router/container/inspect.go @@ -12,7 +12,7 @@ func (s *containerRouter) getContainersByName(ctx context.Context, w http.Respon displaySize := httputils.BoolValue(r, "size") version := httputils.VersionFromContext(ctx) - json, err := s.backend.ContainerInspect(vars["name"], displaySize, version) + json, err := s.backend.ContainerInspect(ctx, vars["name"], displaySize, version) if err != nil { return err } diff --git a/daemon/cluster/executor/backend.go b/daemon/cluster/executor/backend.go index 16ff7bf8dc717..101e1f8283a31 100644 --- a/daemon/cluster/executor/backend.go +++ b/daemon/cluster/executor/backend.go @@ -44,7 +44,7 @@ type Backend interface { ActivateContainerServiceBinding(containerName string) error DeactivateContainerServiceBinding(containerName string) error UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error - ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) + ContainerInspectCurrent(ctx context.Context, name string, size bool) (*types.ContainerJSON, error) ContainerWait(ctx context.Context, name string, condition containerpkg.WaitCondition) (<-chan containerpkg.StateStatus, error) ContainerRm(name string, config *types.ContainerRmConfig) error ContainerKill(name string, sig string) error diff --git a/daemon/cluster/executor/container/adapter.go b/daemon/cluster/executor/container/adapter.go index bf0dabf08837f..29693820f3a5a 100644 --- a/daemon/cluster/executor/container/adapter.go +++ b/daemon/cluster/executor/container/adapter.go @@ -357,7 +357,7 @@ func (c *containerAdapter) start(ctx context.Context) error { } func (c *containerAdapter) inspect(ctx context.Context) (types.ContainerJSON, error) { - cs, err := c.backend.ContainerInspectCurrent(c.container.name(), false) + cs, err := c.backend.ContainerInspectCurrent(ctx, c.container.name(), false) if ctx.Err() != nil { return types.ContainerJSON{}, ctx.Err() } diff --git a/daemon/containerd/service.go b/daemon/containerd/service.go index a2bf22a981458..bd7df68fb4368 100644 --- a/daemon/containerd/service.go +++ b/daemon/containerd/service.go @@ -9,6 +9,8 @@ import ( "github.com/docker/docker/daemon/images" "github.com/docker/docker/image" "github.com/docker/docker/layer" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/identity" ) // ImageService implements daemon.ImageService @@ -116,6 +118,44 @@ func (i *ImageService) GetLayerFolders(img *image.Image, rwLayer layer.RWLayer) } // GetContainerLayerSize returns the real size & virtual size of the container. -func (i *ImageService) GetContainerLayerSize(containerID string) (int64, int64) { - panic("not implemented") +func (i *ImageService) GetContainerLayerSize(ctx context.Context, containerID string) (int64, int64, error) { + snapshotter := i.client.SnapshotService(containerd.DefaultSnapshotter) + sizeCache := make(map[digest.Digest]int64) + snapshotSizeFn := func(d digest.Digest) (int64, error) { + if s, ok := sizeCache[d]; ok { + return s, nil + } + usage, err := snapshotter.Usage(ctx, d.String()) + if err != nil { + return 0, err + } + sizeCache[d] = usage.Size + return usage.Size, nil + } + + c, err := i.client.ContainerService().Get(ctx, containerID) + if err != nil { + return 0, 0, err + } + image, err := i.client.GetImage(ctx, c.Image) + if err != nil { + return 0, 0, err + } + diffIDs, err := image.RootFS(ctx) + if err != nil { + return 0, 0, err + } + chainIDs := identity.ChainIDs(diffIDs) + + usage, err := snapshotter.Usage(ctx, containerID) + if err != nil { + return 0, 0, err + } + size := usage.Size + + virtualSize, err := computeVirtualSize(chainIDs, snapshotSizeFn) + if err != nil { + return 0, 0, err + } + return size, size + virtualSize, nil } diff --git a/daemon/image_service.go b/daemon/image_service.go index e56b31cd4ac2b..84baa23a44d90 100644 --- a/daemon/image_service.go +++ b/daemon/image_service.go @@ -54,7 +54,7 @@ type ImageService interface { GetLayerMountID(cid string) (string, error) ReleaseLayer(rwlayer layer.RWLayer) error LayerDiskUsage(ctx context.Context) (int64, error) - GetContainerLayerSize(containerID string) (int64, int64) + GetContainerLayerSize(ctx context.Context, containerID string) (int64, int64, error) // Windows specific diff --git a/daemon/images/image_unix.go b/daemon/images/image_unix.go index aa9a4a01e4757..dce61a5a305d9 100644 --- a/daemon/images/image_unix.go +++ b/daemon/images/image_unix.go @@ -4,6 +4,8 @@ package images // import "github.com/docker/docker/daemon/images" import ( + "context" + "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/sirupsen/logrus" @@ -16,7 +18,7 @@ func (i *ImageService) GetLayerFolders(img *image.Image, rwLayer layer.RWLayer) } // GetContainerLayerSize returns the real size & virtual size of the container. -func (i *ImageService) GetContainerLayerSize(containerID string) (int64, int64) { +func (i *ImageService) GetContainerLayerSize(ctx context.Context, containerID string) (int64, int64, error) { var ( sizeRw, sizeRootfs int64 err error @@ -27,7 +29,7 @@ func (i *ImageService) GetContainerLayerSize(containerID string) (int64, int64) rwlayer, err := i.layerStore.GetRWLayer(containerID) if err != nil { logrus.Errorf("Failed to compute size of container rootfs %v: %v", containerID, err) - return sizeRw, sizeRootfs + return sizeRw, sizeRootfs, nil } defer i.layerStore.ReleaseRWLayer(rwlayer) @@ -46,5 +48,5 @@ func (i *ImageService) GetContainerLayerSize(containerID string) (int64, int64) sizeRootfs += sizeRw } } - return sizeRw, sizeRootfs + return sizeRw, sizeRootfs, nil } diff --git a/daemon/images/image_windows.go b/daemon/images/image_windows.go index 035d7b7139a45..d69674cbea281 100644 --- a/daemon/images/image_windows.go +++ b/daemon/images/image_windows.go @@ -1,6 +1,8 @@ package images import ( + "context" + "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/docker/docker/pkg/system" @@ -8,9 +10,9 @@ import ( ) // GetContainerLayerSize returns real size & virtual size -func (i *ImageService) GetContainerLayerSize(containerID string) (int64, int64) { +func (i *ImageService) GetContainerLayerSize(ctx context.Context, containerID string) (int64, int64, error) { // TODO Windows - return 0, 0 + return 0, 0, nil } // GetLayerFolders returns the layer folders from an image RootFS diff --git a/daemon/inspect.go b/daemon/inspect.go index 17331be232ca1..c873cf315e2c1 100644 --- a/daemon/inspect.go +++ b/daemon/inspect.go @@ -1,6 +1,7 @@ package daemon // import "github.com/docker/docker/daemon" import ( + "context" "errors" "fmt" "time" @@ -19,19 +20,19 @@ import ( // ContainerInspect returns low-level information about a // container. Returns an error if the container cannot be found, or if // there is an error getting the data. -func (daemon *Daemon) ContainerInspect(name string, size bool, version string) (interface{}, error) { +func (daemon *Daemon) ContainerInspect(ctx context.Context, name string, size bool, version string) (interface{}, error) { switch { case versions.LessThan(version, "1.20"): - return daemon.containerInspectPre120(name) + return daemon.containerInspectPre120(ctx, name) case versions.Equal(version, "1.20"): return daemon.containerInspect120(name) } - return daemon.ContainerInspectCurrent(name, size) + return daemon.ContainerInspectCurrent(ctx, name, size) } // ContainerInspectCurrent returns low-level information about a // container in a most recent api version. -func (daemon *Daemon) ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) { +func (daemon *Daemon) ContainerInspectCurrent(ctx context.Context, name string, size bool) (*types.ContainerJSON, error) { ctr, err := daemon.GetContainer(name) if err != nil { return nil, err @@ -78,7 +79,10 @@ func (daemon *Daemon) ContainerInspectCurrent(name string, size bool) (*types.Co ctr.Unlock() if size { - sizeRw, sizeRootFs := daemon.imageService.GetContainerLayerSize(base.ID) + sizeRw, sizeRootFs, err := daemon.imageService.GetContainerLayerSize(ctx, base.ID) + if err != nil { + return nil, err + } base.SizeRw = &sizeRw base.SizeRootFs = &sizeRootFs } diff --git a/daemon/inspect_linux.go b/daemon/inspect_linux.go index 049a7f743f21e..1e334804befe1 100644 --- a/daemon/inspect_linux.go +++ b/daemon/inspect_linux.go @@ -1,6 +1,8 @@ package daemon // import "github.com/docker/docker/daemon" import ( + "context" + "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/api/types/versions/v1p19" @@ -19,7 +21,7 @@ func setPlatformSpecificContainerFields(container *container.Container, contJSON } // containerInspectPre120 gets containers for pre 1.20 APIs. -func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, error) { +func (daemon *Daemon) containerInspectPre120(ctx context.Context, name string) (*v1p19.ContainerJSON, error) { ctr, err := daemon.GetContainer(name) if err != nil { return nil, err diff --git a/daemon/inspect_windows.go b/daemon/inspect_windows.go index 12fda670dfaf6..10fd5cb5178d9 100644 --- a/daemon/inspect_windows.go +++ b/daemon/inspect_windows.go @@ -1,6 +1,8 @@ package daemon // import "github.com/docker/docker/daemon" import ( + "context" + "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/container" @@ -13,8 +15,8 @@ func setPlatformSpecificContainerFields(container *container.Container, contJSON } // containerInspectPre120 get containers for pre 1.20 APIs. -func (daemon *Daemon) containerInspectPre120(name string) (*types.ContainerJSON, error) { - return daemon.ContainerInspectCurrent(name, false) +func (daemon *Daemon) containerInspectPre120(ctx context.Context, name string) (*types.ContainerJSON, error) { + return daemon.ContainerInspectCurrent(ctx, name, false) } func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { diff --git a/daemon/list.go b/daemon/list.go index d3fadc3490634..a2e0601dc5f15 100644 --- a/daemon/list.go +++ b/daemon/list.go @@ -234,7 +234,10 @@ func (daemon *Daemon) reducePsContainer(ctx context.Context, container *containe // release lock because size calculation is slow if filter.Size { - sizeRw, sizeRootFs := daemon.imageService.GetContainerLayerSize(newC.ID) + sizeRw, sizeRootFs, err := daemon.imageService.GetContainerLayerSize(ctx, newC.ID) + if err != nil { + return nil, err + } newC.SizeRw = sizeRw newC.SizeRootFs = sizeRootFs } diff --git a/daemon/prune.go b/daemon/prune.go index e3ef4668bee75..159d60c1d50b9 100644 --- a/daemon/prune.go +++ b/daemon/prune.go @@ -73,9 +73,12 @@ func (daemon *Daemon) ContainersPrune(ctx context.Context, pruneFilters filters. if !matchLabels(pruneFilters, c.Config.Labels) { continue } - cSize, _ := daemon.imageService.GetContainerLayerSize(c.ID) + cSize, _, err := daemon.imageService.GetContainerLayerSize(ctx, c.ID) + if err != nil { + return nil, err + } // TODO: sets RmLink to true? - err := daemon.ContainerRm(c.ID, &types.ContainerRmConfig{}) + err = daemon.ContainerRm(c.ID, &types.ContainerRmConfig{}) if err != nil { logrus.Warnf("failed to prune container %s: %v", c.ID, err) continue From df9e5a26f4da8936991a8fcbdd3734af8250a5c0 Mon Sep 17 00:00:00 2001 From: Djordje Lukic Date: Mon, 25 Jul 2022 14:22:05 +0200 Subject: [PATCH 14/90] Implement run using the containerd snapshotter Signed-off-by: Djordje Lukic --- daemon/containerd/image.go | 4 ++++ daemon/create.go | 34 +++++++++++++++++++++++++++++----- daemon/daemon.go | 6 ++++++ daemon/oci_linux.go | 28 +++++++++++++++++++++++----- daemon/start.go | 9 ++++++++- 5 files changed, 70 insertions(+), 11 deletions(-) diff --git a/daemon/containerd/image.go b/daemon/containerd/image.go index 33ae73a5f21e7..6e325044ef891 100644 --- a/daemon/containerd/image.go +++ b/daemon/containerd/image.go @@ -21,6 +21,10 @@ import ( var shortID = regexp.MustCompile(`^([a-f0-9]{4,64})$`) +func (i *ImageService) GetContainerdImage(ctx context.Context, refOrID string, platform *ocispec.Platform) (containerdimages.Image, error) { + return i.resolveImageName2(ctx, refOrID) +} + // GetImage returns an image corresponding to the image referred to by refOrID. func (i *ImageService) GetImage(ctx context.Context, refOrID string, platform *ocispec.Platform) (*image.Image, error) { desc, err := i.ResolveImage(ctx, refOrID) diff --git a/daemon/create.go b/daemon/create.go index d2189c9607f03..b75cb04007ae7 100644 --- a/daemon/create.go +++ b/daemon/create.go @@ -8,6 +8,8 @@ import ( "strings" "time" + "github.com/containerd/containerd" + containerdimages "github.com/containerd/containerd/images" "github.com/containerd/containerd/platforms" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" @@ -19,6 +21,7 @@ import ( "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/system" "github.com/docker/docker/runconfig" + "github.com/opencontainers/image-spec/identity" v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/opencontainers/selinux/go-selinux" "github.com/pkg/errors" @@ -32,6 +35,10 @@ type createOpts struct { ignoreImagesArgsEscaped bool } +type containerdImage interface { + GetContainerdImage(ctx context.Context, refOrID string, platform *v1.Platform) (containerdimages.Image, error) +} + // CreateManagedContainer creates a container that is managed by a Service func (daemon *Daemon) CreateManagedContainer(ctx context.Context, params types.ContainerCreateConfig) (containertypes.CreateResponse, error) { return daemon.containerCreate(ctx, createOpts{ @@ -170,12 +177,29 @@ func (daemon *Daemon) create(ctx context.Context, opts createOpts) (retC *contai ctr.HostConfig.StorageOpt = opts.params.HostConfig.StorageOpt - // Set RWLayer for container after mount labels have been set - rwLayer, err := daemon.imageService.CreateLayer(ctr, setupInitLayer(daemon.idMapping)) - if err != nil { - return nil, errdefs.System(err) + if daemon.UsesSnapshotter() { + c8dImge, err := daemon.imageService.(containerdImage).GetContainerdImage(ctx, opts.params.Config.Image, opts.params.Platform) + if err != nil { + return nil, err + } + ctrdimg := containerd.NewImage(daemon.containerdCli, c8dImge) + diffIDs, err := ctrdimg.RootFS(ctx) + if err != nil { + return nil, err + } + parent := identity.ChainID(diffIDs).String() + s := daemon.containerdCli.SnapshotService(containerd.DefaultSnapshotter) + if _, err := s.Prepare(ctx, ctr.ID, parent); err != nil { + return nil, err + } + } else { + // Set RWLayer for container after mount labels have been set + rwLayer, err := daemon.imageService.CreateLayer(ctr, setupInitLayer(daemon.idMapping)) + if err != nil { + return nil, errdefs.System(err) + } + ctr.RWLayer = rwLayer } - ctr.RWLayer = rwLayer current := idtools.CurrentIdentity() if err := idtools.MkdirAndChown(ctr.Root, 0710, idtools.Identity{UID: current.UID, GID: daemon.IdentityMapping().RootPair().GID}); err != nil { diff --git a/daemon/daemon.go b/daemon/daemon.go index 9a6cb891c5992..6d104821042fe 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -1262,6 +1262,9 @@ func (daemon *Daemon) Shutdown(ctx context.Context) error { // Mount sets container.BaseFS // (is it not set coming in? why is it unset?) func (daemon *Daemon) Mount(container *container.Container) error { + if daemon.UsesSnapshotter() { + return nil + } if container.RWLayer == nil { return errors.New("RWLayer of container " + container.ID + " is unexpectedly nil") } @@ -1287,6 +1290,9 @@ func (daemon *Daemon) Mount(container *container.Container) error { // Unmount unsets the container base filesystem func (daemon *Daemon) Unmount(container *container.Container) error { + if daemon.UsesSnapshotter() { + return nil + } if container.RWLayer == nil { return errors.New("RWLayer of container " + container.ID + " is unexpectedly nil") } diff --git a/daemon/oci_linux.go b/daemon/oci_linux.go index 01fed13295000..8a61da2c2cebb 100644 --- a/daemon/oci_linux.go +++ b/daemon/oci_linux.go @@ -11,6 +11,7 @@ import ( "strings" cdcgroups "github.com/containerd/cgroups" + "github.com/containerd/containerd" "github.com/containerd/containerd/containers" coci "github.com/containerd/containerd/oci" "github.com/containerd/containerd/pkg/apparmor" @@ -732,9 +733,11 @@ func WithCommonOptions(daemon *Daemon, c *container.Container) coci.SpecOpts { Path: c.BaseFS.Path(), Readonly: c.HostConfig.ReadonlyRootfs, } - } - if err := c.SetupWorkingDirectory(daemon.idMapping.RootPair()); err != nil { - return err + if err := c.SetupWorkingDirectory(daemon.idMapping.RootPair()); err != nil { + return err + } + } else { + } cwd := c.Config.WorkingDir if len(cwd) == 0 { @@ -1018,7 +1021,6 @@ func (daemon *Daemon) createSpec(ctx context.Context, c *container.Container) (r WithResources(c), WithSysctls(c), WithDevices(daemon, c), - WithUser(c), WithRlimits(daemon, c), WithNamespaces(daemon, c), WithCapabilities(c), @@ -1029,6 +1031,20 @@ func (daemon *Daemon) createSpec(ctx context.Context, c *container.Container) (r WithSelinux(c), WithOOMScore(&c.HostConfig.OomScoreAdj), ) + if daemon.UsesSnapshotter() { + s.Root = &specs.Root{ + Path: "rootfs", + } + if c.Config.User != "" { + opts = append(opts, coci.WithUser(c.Config.User)) + } + if c.Config.WorkingDir != "" { + opts = append(opts, coci.WithProcessCwd(c.Config.WorkingDir)) + } + } else { + opts = append(opts, WithUser(c)) + } + if c.NoNewPrivileges { opts = append(opts, coci.WithNoNewPrivileges) } @@ -1046,7 +1062,9 @@ func (daemon *Daemon) createSpec(ctx context.Context, c *container.Container) (r opts = append(opts, WithRootless(daemon)) } return &s, coci.ApplyOpts(context.Background(), nil, &containers.Container{ - ID: c.ID, + ID: c.ID, + Snapshotter: containerd.DefaultSnapshotter, + SnapshotKey: c.ID, }, &s, opts...) } diff --git a/daemon/start.go b/daemon/start.go index 4ce58a7c00e76..f8b5e7ed2bf06 100644 --- a/daemon/start.go +++ b/daemon/start.go @@ -5,6 +5,7 @@ import ( "runtime" "time" + "github.com/containerd/containerd" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" @@ -176,7 +177,13 @@ func (daemon *Daemon) containerStart(ctx context.Context, container *container.C return err } - err = daemon.containerd.Create(ctx, container.ID, spec, shim, createOptions) + newContainerOpts := []containerd.NewContainerOpts{} + if daemon.UsesSnapshotter() { + newContainerOpts = append(newContainerOpts, containerd.WithSnapshotter(containerd.DefaultSnapshotter)) + newContainerOpts = append(newContainerOpts, containerd.WithSnapshot(container.ID)) + } + + err = daemon.containerd.Create(ctx, container.ID, spec, shim, createOptions, newContainerOpts...) if err != nil { if errdefs.IsConflict(err) { logrus.WithError(err).WithField("container", container.ID).Error("Container not cleaned up from containerd from previous run") From c89f274a6f2084e2b217154ef4b5efc4a62b5c87 Mon Sep 17 00:00:00 2001 From: Nicolas De Loof Date: Mon, 25 Jul 2022 17:24:03 +0200 Subject: [PATCH 15/90] introduce GetImageOpts to manage image inspect data in backend Signed-off-by: Nicolas De Loof --- api/server/router/image/backend.go | 4 +- api/server/router/image/image_routes.go | 37 ++++------------ api/types/image/opts.go | 9 ++++ daemon/cluster/executor/backend.go | 3 +- daemon/cluster/executor/container/adapter.go | 3 +- daemon/containerd/image.go | 46 +++++++++++++++++--- daemon/containerd/image_history.go | 8 +++- daemon/create.go | 5 ++- daemon/image_service.go | 4 +- daemon/images/cache.go | 3 +- daemon/images/image.go | 40 ++++++++++++++++- daemon/images/image_builder.go | 5 ++- daemon/images/image_delete.go | 3 +- daemon/images/image_events.go | 3 +- daemon/images/image_history.go | 7 +-- daemon/images/image_list.go | 8 ++-- daemon/images/image_pull.go | 3 +- daemon/images/image_tag.go | 3 +- daemon/list.go | 5 ++- daemon/oci_windows.go | 3 +- image/image.go | 11 +++++ 21 files changed, 150 insertions(+), 63 deletions(-) create mode 100644 api/types/image/opts.go diff --git a/api/server/router/image/backend.go b/api/server/router/image/backend.go index 5253c8b71d7d1..84ea9da4cd5b7 100644 --- a/api/server/router/image/backend.go +++ b/api/server/router/image/backend.go @@ -22,9 +22,9 @@ type Backend interface { type imageBackend interface { ImageDelete(ctx context.Context, imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error) - ImageHistory(imageName string) ([]*image.HistoryResponseItem, error) + ImageHistory(ctx context.Context, imageName string) ([]*image.HistoryResponseItem, error) Images(ctx context.Context, opts types.ImageListOptions) ([]*types.ImageSummary, error) - GetImage(ctx context.Context, refOrID string, platform *specs.Platform) (*dockerimage.Image, error) + GetImage(ctx context.Context, refOrID string, options image.GetImageOpts) (*dockerimage.Image, error) TagImage(ctx context.Context, imageName, repository, tag string) (string, error) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) } diff --git a/api/server/router/image/image_routes.go b/api/server/router/image/image_routes.go index 0e3cfcddce80b..0ce48dee5cabb 100644 --- a/api/server/router/image/image_routes.go +++ b/api/server/router/image/image_routes.go @@ -12,11 +12,11 @@ import ( "github.com/docker/docker/api/server/httputils" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" + opts "github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/versions" "github.com/docker/docker/errdefs" "github.com/docker/docker/image" - "github.com/docker/docker/layer" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/streamformatter" specs "github.com/opencontainers/image-spec/specs-go/v1" @@ -193,7 +193,7 @@ func (s *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter, r } func (s *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - image, err := s.backend.GetImage(ctx, vars["name"], nil) + image, err := s.backend.GetImage(ctx, vars["name"], opts.GetImageOpts{Details: true}) if err != nil { return err } @@ -219,32 +219,11 @@ func (s *imageRouter) toImageInspect(img *image.Image) (*types.ImageInspect, err } } - var size int64 - var layerMetadata map[string]string - layerID := img.RootFS.ChainID() - if layerID != "" { - l, err := s.layerStore.Get(layerID) - if err != nil { - return nil, err - } - defer layer.ReleaseAndLog(s.layerStore, l) - size = l.Size() - layerMetadata, err = l.Metadata() - if err != nil { - return nil, err - } - } - comment := img.Comment if len(comment) == 0 && len(img.History) > 0 { comment = img.History[len(img.History)-1].Comment } - lastUpdated, err := s.imageStore.GetLastUpdated(img.ID()) - if err != nil { - return nil, err - } - return &types.ImageInspect{ ID: img.ID().String(), RepoTags: repoTags, @@ -261,15 +240,15 @@ func (s *imageRouter) toImageInspect(img *image.Image) (*types.ImageInspect, err Variant: img.Variant, Os: img.OperatingSystem(), OsVersion: img.OSVersion, - Size: size, - VirtualSize: size, // TODO: field unused, deprecate + Size: img.Details.Size, + VirtualSize: img.Details.Size, // TODO: field unused, deprecate GraphDriver: types.GraphDriverData{ - Name: s.layerStore.DriverName(), - Data: layerMetadata, + Name: img.Details.Driver, + Data: img.Details.Metadata, }, RootFS: rootFSToAPIType(img.RootFS), Metadata: types.ImageMetadata{ - LastTagTime: lastUpdated, + LastTagTime: img.Details.LastUpdated, }, }, nil } @@ -324,7 +303,7 @@ func (s *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter, func (s *imageRouter) getImagesHistory(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { name := vars["name"] - history, err := s.backend.ImageHistory(name) + history, err := s.backend.ImageHistory(ctx, name) if err != nil { return err } diff --git a/api/types/image/opts.go b/api/types/image/opts.go new file mode 100644 index 0000000000000..a24f9059ab4f7 --- /dev/null +++ b/api/types/image/opts.go @@ -0,0 +1,9 @@ +package image + +import specs "github.com/opencontainers/image-spec/specs-go/v1" + +// GetImageOpts holds parameters to inspect an image. +type GetImageOpts struct { + Platform *specs.Platform + Details bool +} diff --git a/daemon/cluster/executor/backend.go b/daemon/cluster/executor/backend.go index 101e1f8283a31..f987482e0eeb5 100644 --- a/daemon/cluster/executor/backend.go +++ b/daemon/cluster/executor/backend.go @@ -12,6 +12,7 @@ import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" + opts "github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" @@ -76,5 +77,5 @@ type VolumeBackend interface { type ImageBackend interface { PullImage(ctx context.Context, image, tag string, platform *specs.Platform, metaHeaders map[string][]string, authConfig *registry.AuthConfig, outStream io.Writer) error GetRepository(context.Context, reference.Named, *registry.AuthConfig) (distribution.Repository, error) - GetImage(ctx context.Context, refOrID string, platform *specs.Platform) (retImg *image.Image, retErr error) + GetImage(ctx context.Context, refOrID string, options opts.GetImageOpts) (retImg *image.Image, retErr error) } diff --git a/daemon/cluster/executor/container/adapter.go b/daemon/cluster/executor/container/adapter.go index 29693820f3a5a..9244beec203ce 100644 --- a/daemon/cluster/executor/container/adapter.go +++ b/daemon/cluster/executor/container/adapter.go @@ -16,6 +16,7 @@ import ( "github.com/docker/docker/api/types/backend" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/events" + imagetypes "github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/registry" containerpkg "github.com/docker/docker/container" "github.com/docker/docker/daemon" @@ -75,7 +76,7 @@ func (c *containerAdapter) pullImage(ctx context.Context) error { named, err := reference.ParseNormalizedNamed(spec.Image) if err == nil { if _, ok := named.(reference.Canonical); ok { - _, err := c.imageBackend.GetImage(ctx, spec.Image, nil) + _, err := c.imageBackend.GetImage(ctx, spec.Image, imagetypes.GetImageOpts{}) if err == nil { return nil } diff --git a/daemon/containerd/image.go b/daemon/containerd/image.go index 6e325044ef891..ee19399379391 100644 --- a/daemon/containerd/image.go +++ b/daemon/containerd/image.go @@ -12,8 +12,10 @@ import ( containerdimages "github.com/containerd/containerd/images" "github.com/docker/distribution/reference" containertypes "github.com/docker/docker/api/types/container" + imagetype "github.com/docker/docker/api/types/image" "github.com/docker/docker/errdefs" "github.com/docker/docker/image" + "github.com/docker/docker/layer" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" @@ -26,34 +28,63 @@ func (i *ImageService) GetContainerdImage(ctx context.Context, refOrID string, p } // GetImage returns an image corresponding to the image referred to by refOrID. -func (i *ImageService) GetImage(ctx context.Context, refOrID string, platform *ocispec.Platform) (*image.Image, error) { - desc, err := i.ResolveImage(ctx, refOrID) +func (i *ImageService) GetImage(ctx context.Context, refOrID string, options imagetype.GetImageOpts) (*image.Image, error) { + ii, img, err := i.getImage(ctx, refOrID, options.Platform) if err != nil { return nil, err } + if options.Details { + size, err := ii.Size(ctx) + if err != nil { + return nil, err + } + img.Details = &image.Details{ + Size: size, + Metadata: nil, + Driver: i.GraphDriverName(), + LastUpdated: ii.Metadata().UpdatedAt, + } + } + return img, err +} + +func (i *ImageService) getImage(ctx context.Context, refOrID string, platform *ocispec.Platform) (containerd.Image, *image.Image, error) { + desc, err := i.ResolveImage(ctx, refOrID) + if err != nil { + return nil, nil, err + } + ctrdimg, err := i.resolveImageName2(ctx, refOrID) if err != nil { - return nil, err + return nil, nil, err } ii := containerd.NewImage(i.client, ctrdimg) provider := i.client.ContentStore() conf, err := ctrdimg.Config(ctx, provider, ii.Platform()) if err != nil { - return nil, err + return nil, nil, err } var ociimage ocispec.Image imageConfigBytes, err := content.ReadBlob(ctx, ii.ContentStore(), conf) if err != nil { - return nil, err + return nil, nil, err } if err := json.Unmarshal(imageConfigBytes, &ociimage); err != nil { - return nil, err + return nil, nil, err } - return &image.Image{ + fs, err := ii.RootFS(ctx) + if err != nil { + return nil, nil, err + } + rootfs := image.NewRootFS() + for _, id := range fs { + rootfs.Append(layer.DiffID(id)) + } + return ii, &image.Image{ V1Image: image.V1Image{ ID: string(desc.Digest), OS: ociimage.OS, @@ -66,6 +97,7 @@ func (i *ImageService) GetImage(ctx context.Context, refOrID string, platform *o WorkingDir: ociimage.Config.WorkingDir, }, }, + RootFS: rootfs, }, nil } diff --git a/daemon/containerd/image_history.go b/daemon/containerd/image_history.go index d684a322bf026..eef6c8ce391c0 100644 --- a/daemon/containerd/image_history.go +++ b/daemon/containerd/image_history.go @@ -1,9 +1,13 @@ package containerd -import imagetype "github.com/docker/docker/api/types/image" +import ( + "context" + + imagetype "github.com/docker/docker/api/types/image" +) // ImageHistory returns a slice of ImageHistory structures for the specified // image name by walking the image lineage. -func (i *ImageService) ImageHistory(name string) ([]*imagetype.HistoryResponseItem, error) { +func (i *ImageService) ImageHistory(ctx context.Context, name string) ([]*imagetype.HistoryResponseItem, error) { panic("not implemented") } diff --git a/daemon/create.go b/daemon/create.go index b75cb04007ae7..5b2ec16c0d6f1 100644 --- a/daemon/create.go +++ b/daemon/create.go @@ -13,6 +13,7 @@ import ( "github.com/containerd/containerd/platforms" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" + imagetypes "github.com/docker/docker/api/types/image" networktypes "github.com/docker/docker/api/types/network" "github.com/docker/docker/container" "github.com/docker/docker/daemon/images" @@ -76,7 +77,7 @@ func (daemon *Daemon) containerCreate(ctx context.Context, opts createOpts) (con } if opts.params.Platform == nil && opts.params.Config.Image != "" { - if img, _ := daemon.imageService.GetImage(ctx, opts.params.Config.Image, opts.params.Platform); img != nil { + if img, _ := daemon.imageService.GetImage(ctx, opts.params.Config.Image, imagetypes.GetImageOpts{Platform: opts.params.Platform}); img != nil { p := maximumSpec() imgPlat := v1.Platform{ OS: img.OS, @@ -127,7 +128,7 @@ func (daemon *Daemon) create(ctx context.Context, opts createOpts) (retC *contai ) if opts.params.Config.Image != "" { - img, err = daemon.imageService.GetImage(ctx, opts.params.Config.Image, opts.params.Platform) + img, err = daemon.imageService.GetImage(ctx, opts.params.Config.Image, imagetypes.GetImageOpts{Platform: opts.params.Platform}) if err != nil { return nil, err } diff --git a/daemon/image_service.go b/daemon/image_service.go index 84baa23a44d90..686cc18ab8425 100644 --- a/daemon/image_service.go +++ b/daemon/image_service.go @@ -40,8 +40,8 @@ type ImageService interface { ImportImage(ctx context.Context, src string, repository string, platform *v1.Platform, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error TagImage(ctx context.Context, imageName, repository, tag string) (string, error) TagImageWithReference(ctx context.Context, imageID image.ID, newTag reference.Named) error - GetImage(ctx context.Context, refOrID string, platform *v1.Platform) (*image.Image, error) - ImageHistory(name string) ([]*imagetype.HistoryResponseItem, error) + GetImage(ctx context.Context, refOrID string, options imagetype.GetImageOpts) (*image.Image, error) + ImageHistory(ctx context.Context, name string) ([]*imagetype.HistoryResponseItem, error) CommitImage(c backend.CommitConfig) (image.ID, error) SquashImage(id, parent string) (string, error) diff --git a/daemon/images/cache.go b/daemon/images/cache.go index 000d73211861a..858f2b9b4376c 100644 --- a/daemon/images/cache.go +++ b/daemon/images/cache.go @@ -3,6 +3,7 @@ package images // import "github.com/docker/docker/daemon/images" import ( "context" + imagetypes "github.com/docker/docker/api/types/image" "github.com/docker/docker/builder" "github.com/docker/docker/image/cache" "github.com/sirupsen/logrus" @@ -17,7 +18,7 @@ func (i *ImageService) MakeImageCache(ctx context.Context, sourceRefs []string) cache := cache.New(i.imageStore) for _, ref := range sourceRefs { - img, err := i.GetImage(ctx, ref, nil) + img, err := i.GetImage(ctx, ref, imagetypes.GetImageOpts{}) if err != nil { logrus.Warnf("Could not look up %s for cache resolution, skipping: %+v", ref, err) continue diff --git a/daemon/images/image.go b/daemon/images/image.go index 974bf1e7d088b..3bb37a0f8a3c4 100644 --- a/daemon/images/image.go +++ b/daemon/images/image.go @@ -12,8 +12,10 @@ import ( "github.com/containerd/containerd/leases" "github.com/containerd/containerd/platforms" "github.com/docker/distribution/reference" + imagetypes "github.com/docker/docker/api/types/image" "github.com/docker/docker/errdefs" "github.com/docker/docker/image" + "github.com/docker/docker/layer" "github.com/opencontainers/go-digest" specs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" @@ -148,7 +150,43 @@ func (i *ImageService) manifestMatchesPlatform(img *image.Image, platform specs. } // GetImage returns an image corresponding to the image referred to by refOrID. -func (i *ImageService) GetImage(ctx context.Context, refOrID string, platform *specs.Platform) (retImg *image.Image, retErr error) { +func (i *ImageService) GetImage(ctx context.Context, refOrID string, options imagetypes.GetImageOpts) (*image.Image, error) { + img, err := i.getImage(refOrID, options.Platform) + if err != nil { + return nil, err + } + if options.Details { + var size int64 + var layerMetadata map[string]string + layerID := img.RootFS.ChainID() + if layerID != "" { + l, err := i.layerStore.Get(layerID) + if err != nil { + return nil, err + } + defer layer.ReleaseAndLog(i.layerStore, l) + size = l.Size() + layerMetadata, err = l.Metadata() + if err != nil { + return nil, err + } + } + + lastUpdated, err := i.imageStore.GetLastUpdated(img.ID()) + if err != nil { + return nil, err + } + img.Details = &image.Details{ + Size: size, + Metadata: layerMetadata, + Driver: i.layerStore.DriverName(), + LastUpdated: lastUpdated, + } + } + return img, nil +} + +func (i *ImageService) getImage(refOrID string, platform *specs.Platform) (retImg *image.Image, retErr error) { defer func() { if retErr != nil || retImg == nil || platform == nil { return diff --git a/daemon/images/image_builder.go b/daemon/images/image_builder.go index 430a589ae9088..1df49a65825d8 100644 --- a/daemon/images/image_builder.go +++ b/daemon/images/image_builder.go @@ -8,6 +8,7 @@ import ( "github.com/containerd/containerd/platforms" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types/backend" + imagetypes "github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/builder" "github.com/docker/docker/errdefs" @@ -167,7 +168,7 @@ func (i *ImageService) pullForBuilder(ctx context.Context, name string, authConf return nil, err } - img, err := i.GetImage(nil, name, platform) + img, err := i.GetImage(nil, name, imagetypes.GetImageOpts{Platform: platform}) if errdefs.IsNotFound(err) && img != nil && platform != nil { imgPlat := specs.Platform{ OS: img.OS, @@ -211,7 +212,7 @@ func (i *ImageService) GetImageAndReleasableLayer(ctx context.Context, refOrID s } if opts.PullOption != backend.PullOptionForcePull { - img, err := i.GetImage(nil, refOrID, opts.Platform) + img, err := i.GetImage(nil, refOrID, imagetypes.GetImageOpts{Platform: opts.Platform}) if err != nil && opts.PullOption == backend.PullOptionNoPull { return nil, nil, err } diff --git a/daemon/images/image_delete.go b/daemon/images/image_delete.go index 2d3cabe264f50..b66bb7716736c 100644 --- a/daemon/images/image_delete.go +++ b/daemon/images/image_delete.go @@ -8,6 +8,7 @@ import ( "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" + imagetypes "github.com/docker/docker/api/types/image" "github.com/docker/docker/container" "github.com/docker/docker/errdefs" "github.com/docker/docker/image" @@ -63,7 +64,7 @@ func (i *ImageService) ImageDelete(ctx context.Context, imageRef string, force, start := time.Now() records := []types.ImageDeleteResponseItem{} - img, err := i.GetImage(ctx, imageRef, nil) + img, err := i.GetImage(ctx, imageRef, imagetypes.GetImageOpts{}) if err != nil { return nil, err } diff --git a/daemon/images/image_events.go b/daemon/images/image_events.go index e89a7c49c7e5e..294f3b4b3d7d7 100644 --- a/daemon/images/image_events.go +++ b/daemon/images/image_events.go @@ -2,6 +2,7 @@ package images // import "github.com/docker/docker/daemon/images" import ( "github.com/docker/docker/api/types/events" + imagetypes "github.com/docker/docker/api/types/image" ) // LogImageEvent generates an event related to an image with only the default attributes. @@ -11,7 +12,7 @@ func (i *ImageService) LogImageEvent(imageID, refName, action string) { // LogImageEventWithAttributes generates an event related to an image with specific given attributes. func (i *ImageService) LogImageEventWithAttributes(imageID, refName, action string, attributes map[string]string) { - img, err := i.GetImage(nil, imageID, nil) + img, err := i.GetImage(nil, imageID, imagetypes.GetImageOpts{}) if err == nil && img.Config != nil { // image has not been removed yet. // it could be missing if the event is `delete`. diff --git a/daemon/images/image_history.go b/daemon/images/image_history.go index b092b2f76f5ff..8a9f03d95005e 100644 --- a/daemon/images/image_history.go +++ b/daemon/images/image_history.go @@ -1,6 +1,7 @@ package images // import "github.com/docker/docker/daemon/images" import ( + "context" "fmt" "time" @@ -11,9 +12,9 @@ import ( // ImageHistory returns a slice of ImageHistory structures for the specified image // name by walking the image lineage. -func (i *ImageService) ImageHistory(name string) ([]*image.HistoryResponseItem, error) { +func (i *ImageService) ImageHistory(ctx context.Context, name string) ([]*image.HistoryResponseItem, error) { start := time.Now() - img, err := i.GetImage(nil, name, nil) + img, err := i.GetImage(ctx, name, image.GetImageOpts{}) if err != nil { return nil, err } @@ -69,7 +70,7 @@ func (i *ImageService) ImageHistory(name string) ([]*image.HistoryResponseItem, if id == "" { break } - histImg, err = i.GetImage(nil, id.String(), nil) + histImg, err = i.GetImage(ctx, id.String(), image.GetImageOpts{}) if err != nil { break } diff --git a/daemon/images/image_list.go b/daemon/images/image_list.go index 343a539672583..d994f13f4791d 100644 --- a/daemon/images/image_list.go +++ b/daemon/images/image_list.go @@ -5,6 +5,8 @@ import ( "fmt" "sort" + imagetypes "github.com/docker/docker/api/types/image" + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/container" @@ -30,7 +32,7 @@ func (r byCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] } func (r byCreated) Less(i, j int) bool { return r[i].Created < r[j].Created } // Images returns a filtered list of images. -func (i *ImageService) Images(_ context.Context, opts types.ImageListOptions) ([]*types.ImageSummary, error) { +func (i *ImageService) Images(ctx context.Context, opts types.ImageListOptions) ([]*types.ImageSummary, error) { if err := opts.Filters.Validate(acceptedImageFilterTags); err != nil { return nil, err } @@ -49,7 +51,7 @@ func (i *ImageService) Images(_ context.Context, opts types.ImageListOptions) ([ err error ) err = opts.Filters.WalkValues("before", func(value string) error { - beforeFilter, err = i.GetImage(nil, value, nil) + beforeFilter, err = i.GetImage(ctx, value, imagetypes.GetImageOpts{}) return err }) if err != nil { @@ -57,7 +59,7 @@ func (i *ImageService) Images(_ context.Context, opts types.ImageListOptions) ([ } err = opts.Filters.WalkValues("since", func(value string) error { - sinceFilter, err = i.GetImage(nil, value, nil) + sinceFilter, err = i.GetImage(ctx, value, imagetypes.GetImageOpts{}) return err }) if err != nil { diff --git a/daemon/images/image_pull.go b/daemon/images/image_pull.go index 18b33d0227405..ee8be2e8422d3 100644 --- a/daemon/images/image_pull.go +++ b/daemon/images/image_pull.go @@ -10,6 +10,7 @@ import ( "github.com/containerd/containerd/namespaces" dist "github.com/docker/distribution" "github.com/docker/distribution/reference" + imagetypes "github.com/docker/docker/api/types/image" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/distribution" progressutils "github.com/docker/docker/distribution/utils" @@ -63,7 +64,7 @@ func (i *ImageService) PullImage(ctx context.Context, image, tag string, platfor // we allow the image to have a non-matching architecture. The code // below checks for this situation, and returns a warning to the client, // as well as logging it to the daemon logs. - img, err := i.GetImage(nil, image, platform) + img, err := i.GetImage(ctx, image, imagetypes.GetImageOpts{Platform: platform}) // Note that this is a special case where GetImage returns both an image // and an error: https://github.com/docker/docker/blob/v20.10.7/daemon/images/image.go#L175-L183 diff --git a/daemon/images/image_tag.go b/daemon/images/image_tag.go index 708b2689398f7..81d00d35fee8e 100644 --- a/daemon/images/image_tag.go +++ b/daemon/images/image_tag.go @@ -4,13 +4,14 @@ import ( "context" "github.com/docker/distribution/reference" + imagetypes "github.com/docker/docker/api/types/image" "github.com/docker/docker/image" ) // TagImage creates the tag specified by newTag, pointing to the image named // imageName (alternatively, imageName can also be an image ID). func (i *ImageService) TagImage(ctx context.Context, imageName, repository, tag string) (string, error) { - img, err := i.GetImage(nil, imageName, nil) + img, err := i.GetImage(ctx, imageName, imagetypes.GetImageOpts{}) if err != nil { return "", err } diff --git a/daemon/list.go b/daemon/list.go index a2e0601dc5f15..db9869c8c8297 100644 --- a/daemon/list.go +++ b/daemon/list.go @@ -9,6 +9,7 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" + imagetypes "github.com/docker/docker/api/types/image" "github.com/docker/docker/container" "github.com/docker/docker/daemon/images" "github.com/docker/docker/errdefs" @@ -321,7 +322,7 @@ func (daemon *Daemon) foldFilter(ctx context.Context, view container.View, confi if psFilters.Contains("ancestor") { ancestorFilter = true psFilters.WalkValues("ancestor", func(ancestor string) error { - img, err := daemon.imageService.GetImage(ctx, ancestor, nil) + img, err := daemon.imageService.GetImage(ctx, ancestor, imagetypes.GetImageOpts{}) if err != nil { logrus.Warnf("Error while looking up for image %v", ancestor) return nil @@ -585,7 +586,7 @@ func (daemon *Daemon) refreshImage(ctx context.Context, s *container.Snapshot, f c := s.Container image := s.Image // keep the original ref if still valid (hasn't changed) if image != s.ImageID { - img, err := daemon.imageService.GetImage(ctx, image, nil) + img, err := daemon.imageService.GetImage(ctx, image, imagetypes.GetImageOpts{}) if _, isDNE := err.(images.ErrImageDoesNotExist); err != nil && !isDNE { return nil, err } diff --git a/daemon/oci_windows.go b/daemon/oci_windows.go index 432933965625a..225c9942e1d03 100644 --- a/daemon/oci_windows.go +++ b/daemon/oci_windows.go @@ -9,6 +9,7 @@ import ( "strings" containertypes "github.com/docker/docker/api/types/container" + imagetypes "github.com/docker/docker/api/types/image" "github.com/docker/docker/container" "github.com/docker/docker/errdefs" "github.com/docker/docker/oci" @@ -27,7 +28,7 @@ const ( func (daemon *Daemon) createSpec(ctx context.Context, c *container.Container) (*specs.Spec, error) { - img, err := daemon.imageService.GetImage(ctx, string(c.ImageID), nil) + img, err := daemon.imageService.GetImage(ctx, string(c.ImageID), imagetypes.GetImageOpts{}) if err != nil { return nil, err } diff --git a/image/image.go b/image/image.go index 99e8af0cc78cb..25179a1c8a693 100644 --- a/image/image.go +++ b/image/image.go @@ -112,6 +112,17 @@ type Image struct { // computedID is the ID computed from the hash of the image config. // Not to be confused with the legacy V1 ID in V1Image. computedID ID + + // Details holds additional details about image + Details *Details `json:"-"` +} + +// Details provides additional image data +type Details struct { + Size int64 + Metadata map[string]string + Driver string + LastUpdated time.Time } // RawJSON returns the immutable JSON associated with the image. From b7fe4ead03b0e0108810a111ab5fde4740033901 Mon Sep 17 00:00:00 2001 From: Nicolas De Loof Date: Thu, 28 Jul 2022 11:23:28 +0200 Subject: [PATCH 16/90] implement docker system df Signed-off-by: Nicolas De Loof --- api/server/router/system/system_routes.go | 2 +- daemon/containerd/service.go | 50 ++++++++++++++++++++++- 2 files changed, 49 insertions(+), 3 deletions(-) diff --git a/api/server/router/system/system_routes.go b/api/server/router/system/system_routes.go index bb7897d229103..a9423122fc7a0 100644 --- a/api/server/router/system/system_routes.go +++ b/api/server/router/system/system_routes.go @@ -116,7 +116,7 @@ func (s *systemRouter) getDiskUsage(ctx context.Context, w http.ResponseWriter, var getContainers, getImages, getVolumes, getBuildCache bool typeStrs, ok := r.Form["type"] if versions.LessThan(version, "1.42") || !ok { - getContainers, getImages, getVolumes, getBuildCache = true, true, true, true + getContainers, getImages, getVolumes, getBuildCache = true, true, true, s.builder != nil } else { for _, typ := range typeStrs { switch types.DiskUsageObject(typ) { diff --git a/daemon/containerd/service.go b/daemon/containerd/service.go index bd7df68fb4368..1c0686e74423a 100644 --- a/daemon/containerd/service.go +++ b/daemon/containerd/service.go @@ -2,20 +2,25 @@ package containerd import ( "context" + "fmt" "github.com/containerd/containerd" + "github.com/containerd/containerd/snapshots" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" "github.com/docker/docker/container" "github.com/docker/docker/daemon/images" "github.com/docker/docker/image" "github.com/docker/docker/layer" "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/identity" + "golang.org/x/sync/singleflight" ) // ImageService implements daemon.ImageService type ImageService struct { client *containerd.Client + usage singleflight.Group } // NewService creates a new ImageService. @@ -97,12 +102,53 @@ func (i *ImageService) ReleaseLayer(rwlayer layer.RWLayer) error { // LayerDiskUsage returns the number of bytes used by layer stores // called from disk_usage.go func (i *ImageService) LayerDiskUsage(ctx context.Context) (int64, error) { - panic("not implemented") + ch := i.usage.DoChan("LayerDiskUsage", func() (interface{}, error) { + var allLayersSize int64 + snapshotter := i.client.SnapshotService(containerd.DefaultSnapshotter) + snapshotter.Walk(ctx, func(ctx context.Context, info snapshots.Info) error { + usage, err := snapshotter.Usage(ctx, info.Name) + if err != nil { + return err + } + allLayersSize += usage.Size + return nil + }) + return allLayersSize, nil + }) + select { + case <-ctx.Done(): + return 0, ctx.Err() + case res := <-ch: + if res.Err != nil { + return 0, res.Err + } + return res.Val.(int64), nil + } } // ImageDiskUsage returns information about image data disk usage. func (i *ImageService) ImageDiskUsage(ctx context.Context) ([]*types.ImageSummary, error) { - panic("not implemented") + ch := i.usage.DoChan("ImageDiskUsage", func() (interface{}, error) { + // Get all top images with extra attributes + images, err := i.Images(ctx, types.ImageListOptions{ + Filters: filters.NewArgs(), + SharedSize: true, + ContainerCount: true, + }) + if err != nil { + return nil, fmt.Errorf("failed to retrieve image list: %v", err) + } + return images, nil + }) + select { + case <-ctx.Done(): + return nil, ctx.Err() + case res := <-ch: + if res.Err != nil { + return nil, res.Err + } + return res.Val.([]*types.ImageSummary), nil + } } // UpdateConfig values From 4233177e824a3c942a38640bcef039fb9e055820 Mon Sep 17 00:00:00 2001 From: Nicolas De Loof Date: Thu, 28 Jul 2022 16:09:32 +0200 Subject: [PATCH 17/90] introduce image.EventLogger to manage image lifecycle events Signed-off-by: Nicolas De Loof --- daemon/containerd/image_events.go | 13 ------------ daemon/image_service.go | 2 -- daemon/images/image_delete.go | 8 ++++---- daemon/images/image_exporter.go | 4 ++-- daemon/images/image_import.go | 2 +- daemon/images/image_pull.go | 2 +- daemon/images/image_push.go | 2 +- daemon/images/image_tag.go | 2 +- daemon/images/service.go | 8 +++++++- .../images/image_events.go => image/events.go | 20 +++++++++++-------- image/tarexport/load.go | 2 +- image/tarexport/save.go | 2 +- image/tarexport/tarexport.go | 7 ++----- 13 files changed, 33 insertions(+), 41 deletions(-) delete mode 100644 daemon/containerd/image_events.go rename daemon/images/image_events.go => image/events.go (50%) diff --git a/daemon/containerd/image_events.go b/daemon/containerd/image_events.go deleted file mode 100644 index 11e9a9900fdd4..0000000000000 --- a/daemon/containerd/image_events.go +++ /dev/null @@ -1,13 +0,0 @@ -package containerd - -// LogImageEvent generates an event related to an image with only the -// default attributes. -func (i *ImageService) LogImageEvent(imageID, refName, action string) { - panic("not implemented") -} - -// LogImageEventWithAttributes generates an event related to an image with -// specific given attributes. -func (i *ImageService) LogImageEventWithAttributes(imageID, refName, action string, attributes map[string]string) { - panic("not implemented") -} diff --git a/daemon/image_service.go b/daemon/image_service.go index 686cc18ab8425..9eae28ef8fe8a 100644 --- a/daemon/image_service.go +++ b/daemon/image_service.go @@ -32,8 +32,6 @@ type ImageService interface { ExportImage(ctx context.Context, names []string, outStream io.Writer) error LoadImage(ctx context.Context, inTar io.ReadCloser, outStream io.Writer, quiet bool) error Images(ctx context.Context, opts types.ImageListOptions) ([]*types.ImageSummary, error) - LogImageEvent(imageID, refName, action string) - LogImageEventWithAttributes(imageID, refName, action string, attributes map[string]string) CountImages() int ImageDiskUsage(ctx context.Context) ([]*types.ImageSummary, error) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) diff --git a/daemon/images/image_delete.go b/daemon/images/image_delete.go index b66bb7716736c..902ba78c02610 100644 --- a/daemon/images/image_delete.go +++ b/daemon/images/image_delete.go @@ -105,7 +105,7 @@ func (i *ImageService) ImageDelete(ctx context.Context, imageRef string, force, untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(parsedRef)} - i.LogImageEvent(imgID.String(), imgID.String(), "untag") + i.eventsLogger.LogImageEvent(imgID.String(), imgID.String(), "untag") records = append(records, untaggedRecord) repoRefs = i.referenceStore.References(imgID.Digest()) @@ -168,7 +168,7 @@ func (i *ImageService) ImageDelete(ctx context.Context, imageRef string, force, untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(parsedRef)} - i.LogImageEvent(imgID.String(), imgID.String(), "untag") + i.eventsLogger.LogImageEvent(imgID.String(), imgID.String(), "untag") records = append(records, untaggedRecord) } } @@ -254,7 +254,7 @@ func (i *ImageService) removeAllReferencesToImageID(imgID image.ID, records *[]t untaggedRecord := types.ImageDeleteResponseItem{Untagged: reference.FamiliarString(parsedRef)} - i.LogImageEvent(imgID.String(), imgID.String(), "untag") + i.eventsLogger.LogImageEvent(imgID.String(), imgID.String(), "untag") *records = append(*records, untaggedRecord) } @@ -329,7 +329,7 @@ func (i *ImageService) imageDeleteHelper(imgID image.ID, records *[]types.ImageD return err } - i.LogImageEvent(imgID.String(), imgID.String(), "delete") + i.eventsLogger.LogImageEvent(imgID.String(), imgID.String(), "delete") *records = append(*records, types.ImageDeleteResponseItem{Deleted: imgID.String()}) for _, removedLayer := range removedLayers { *records = append(*records, types.ImageDeleteResponseItem{Deleted: removedLayer.ChainID.String()}) diff --git a/daemon/images/image_exporter.go b/daemon/images/image_exporter.go index 2ab4af1a83285..1a5880ec3b84b 100644 --- a/daemon/images/image_exporter.go +++ b/daemon/images/image_exporter.go @@ -13,7 +13,7 @@ import ( // the same tag are exported. names is the set of tags to export, and // outStream is the writer which the images are written to. func (i *ImageService) ExportImage(ctx context.Context, names []string, outStream io.Writer) error { - imageExporter := tarexport.NewTarExporter(i.imageStore, i.layerStore, i.referenceStore, i) + imageExporter := tarexport.NewTarExporter(i.imageStore, i.layerStore, i.referenceStore, i.eventsLogger.LogImageEvent) return imageExporter.Save(names, outStream) } @@ -21,6 +21,6 @@ func (i *ImageService) ExportImage(ctx context.Context, names []string, outStrea // complement of ExportImage. The input stream is an uncompressed tar // ball containing images and metadata. func (i *ImageService) LoadImage(ctx context.Context, inTar io.ReadCloser, outStream io.Writer, quiet bool) error { - imageExporter := tarexport.NewTarExporter(i.imageStore, i.layerStore, i.referenceStore, i) + imageExporter := tarexport.NewTarExporter(i.imageStore, i.layerStore, i.referenceStore, i.eventsLogger.LogImageEvent) return imageExporter.Load(inTar, outStream, quiet) } diff --git a/daemon/images/image_import.go b/daemon/images/image_import.go index 8f4fa9c2e07c2..21ac82929ccaf 100644 --- a/daemon/images/image_import.go +++ b/daemon/images/image_import.go @@ -139,7 +139,7 @@ func (i *ImageService) ImportImage(ctx context.Context, src string, repository s } } - i.LogImageEvent(id.String(), id.String(), "import") + i.eventsLogger.LogImageEvent(id.String(), id.String(), "import") outStream.Write(streamformatter.FormatStatus("", id.String())) return nil } diff --git a/daemon/images/image_pull.go b/daemon/images/image_pull.go index ee8be2e8422d3..4c78a0a133917 100644 --- a/daemon/images/image_pull.go +++ b/daemon/images/image_pull.go @@ -117,7 +117,7 @@ func (i *ImageService) pullImageWithReference(ctx context.Context, ref reference AuthConfig: authConfig, ProgressOutput: progress.ChanOutput(progressChan), RegistryService: i.registryService, - ImageEventLogger: i.LogImageEvent, + ImageEventLogger: i.eventsLogger.LogImageEvent, MetadataStore: i.distributionMetadataStore, ImageStore: imageStore, ReferenceStore: i.referenceStore, diff --git a/daemon/images/image_push.go b/daemon/images/image_push.go index 1bc18672911e9..cfde1f09502f9 100644 --- a/daemon/images/image_push.go +++ b/daemon/images/image_push.go @@ -47,7 +47,7 @@ func (i *ImageService) PushImage(ctx context.Context, image, tag string, metaHea AuthConfig: authConfig, ProgressOutput: progress.ChanOutput(progressChan), RegistryService: i.registryService, - ImageEventLogger: i.LogImageEvent, + ImageEventLogger: i.eventsLogger.LogImageEvent, MetadataStore: i.distributionMetadataStore, ImageStore: distribution.NewImageConfigStoreFromStore(i.imageStore), ReferenceStore: i.referenceStore, diff --git a/daemon/images/image_tag.go b/daemon/images/image_tag.go index 81d00d35fee8e..b9fcb0260bd55 100644 --- a/daemon/images/image_tag.go +++ b/daemon/images/image_tag.go @@ -39,6 +39,6 @@ func (i *ImageService) TagImageWithReference(ctx context.Context, imageID image. if err := i.imageStore.SetLastUpdated(imageID); err != nil { return err } - i.LogImageEvent(imageID.String(), reference.FamiliarString(newTag), "tag") + i.eventsLogger.LogImageEvent(imageID.String(), reference.FamiliarString(newTag), "tag") return nil } diff --git a/daemon/images/service.go b/daemon/images/service.go index c23e878cf3273..92965170e8077 100644 --- a/daemon/images/service.go +++ b/daemon/images/service.go @@ -53,7 +53,7 @@ type ImageServiceConfig struct { // NewImageService returns a new ImageService from a configuration func NewImageService(config ImageServiceConfig) *ImageService { - return &ImageService{ + i := &ImageService{ containers: config.ContainerStore, distributionMetadataStore: config.DistributionMetadataStore, downloadManager: xfer.NewLayerDownloadManager(config.LayerStore, config.MaxConcurrentDownloads, xfer.WithMaxDownloadAttempts(config.MaxDownloadAttempts)), @@ -68,6 +68,11 @@ func NewImageService(config ImageServiceConfig) *ImageService { content: config.ContentStore, contentNamespace: config.ContentNamespace, } + i.eventsLogger = image.EventLogger{ + Events: i.eventsService, + GetImage: i.GetImage, + } + return i } // ImageService provides a backend for image management @@ -87,6 +92,7 @@ type ImageService struct { content content.Store contentNamespace string usage singleflight.Group + eventsLogger image.EventLogger } // DistributionServices provides daemon image storage services diff --git a/daemon/images/image_events.go b/image/events.go similarity index 50% rename from daemon/images/image_events.go rename to image/events.go index 294f3b4b3d7d7..3f531470a9ad4 100644 --- a/daemon/images/image_events.go +++ b/image/events.go @@ -1,18 +1,22 @@ -package images // import "github.com/docker/docker/daemon/images" +package image import ( + "context" + "github.com/docker/docker/api/types/events" imagetypes "github.com/docker/docker/api/types/image" + daemonevents "github.com/docker/docker/daemon/events" ) -// LogImageEvent generates an event related to an image with only the default attributes. -func (i *ImageService) LogImageEvent(imageID, refName, action string) { - i.LogImageEventWithAttributes(imageID, refName, action, map[string]string{}) +// EventLogger produces daemon events for image lifecycle +type EventLogger struct { + Events *daemonevents.Events + GetImage func(ctx context.Context, refOrID string, options imagetypes.GetImageOpts) (*Image, error) } -// LogImageEventWithAttributes generates an event related to an image with specific given attributes. -func (i *ImageService) LogImageEventWithAttributes(imageID, refName, action string, attributes map[string]string) { - img, err := i.GetImage(nil, imageID, imagetypes.GetImageOpts{}) +func (e EventLogger) LogImageEvent(imageID, refName, action string) { + attributes := map[string]string{} + img, err := e.GetImage(nil, imageID, imagetypes.GetImageOpts{}) if err == nil && img.Config != nil { // image has not been removed yet. // it could be missing if the event is `delete`. @@ -26,7 +30,7 @@ func (i *ImageService) LogImageEventWithAttributes(imageID, refName, action stri Attributes: attributes, } - i.eventsService.Log(action, events.ImageEventType, actor) + e.Events.Log(action, events.ImageEventType, actor) } // copyAttributes guarantees that labels are not mutated by event triggers. diff --git a/image/tarexport/load.go b/image/tarexport/load.go index 1c5aac627fe4b..07790bb6cb882 100644 --- a/image/tarexport/load.go +++ b/image/tarexport/load.go @@ -134,7 +134,7 @@ func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer, quiet bool) } parentLinks = append(parentLinks, parentLink{imgID, m.Parent}) - l.loggerImgEvent.LogImageEvent(imgID.String(), imgID.String(), "load") + l.loggerImgEvent(imgID.String(), imgID.String(), "load") } for _, p := range validatedParentLinks(parentLinks) { diff --git a/image/tarexport/save.go b/image/tarexport/save.go index 054a02ba6b440..c43bc60d11583 100644 --- a/image/tarexport/save.go +++ b/image/tarexport/save.go @@ -224,7 +224,7 @@ func (s *saveSession) save(outStream io.Writer) error { parentID, _ := s.is.GetParent(id) parentLinks = append(parentLinks, parentLink{id, parentID}) - s.tarexporter.loggerImgEvent.LogImageEvent(id.String(), id.String(), "save") + s.tarexporter.loggerImgEvent(id.String(), id.String(), "save") } for i, p := range validatedParentLinks(parentLinks) { diff --git a/image/tarexport/tarexport.go b/image/tarexport/tarexport.go index 5bcad2265c878..9ad63879f028e 100644 --- a/image/tarexport/tarexport.go +++ b/image/tarexport/tarexport.go @@ -30,11 +30,8 @@ type tarexporter struct { loggerImgEvent LogImageEvent } -// LogImageEvent defines interface for event generation related to image tar(load and save) operations -type LogImageEvent interface { - // LogImageEvent generates an event related to an image operation - LogImageEvent(imageID, refName, action string) -} +// LogImageEvent defines function to generate events related to image tar(load and save) operations +type LogImageEvent func(imageID, refName, action string) // NewTarExporter returns new Exporter for tar packages func NewTarExporter(is image.Store, lss layer.Store, rs refstore.Store, loggerImgEvent LogImageEvent) image.Exporter { From 368e268dcdc0a481f99157d5faeb1322ef5d201a Mon Sep 17 00:00:00 2001 From: Djordje Lukic Date: Wed, 27 Jul 2022 14:24:34 +0200 Subject: [PATCH 18/90] Make build and buildx work Signed-off-by: Djordje Lukic --- builder/builder-next/builder.go | 69 +- builder/builder-next/control/control.go | 584 +++++++++++++ builder/builder-next/controller.go | 97 ++- .../builder-next/worker/containerdworker.go | 114 +++ cmd/dockerd/daemon.go | 48 +- vendor.mod | 2 + vendor.sum | 11 +- .../github.com/Microsoft/hcsshim/hcn/hcn.go | 328 ++++++++ .../Microsoft/hcsshim/hcn/hcnendpoint.go | 388 +++++++++ .../Microsoft/hcsshim/hcn/hcnerrors.go | 164 ++++ .../Microsoft/hcsshim/hcn/hcnglobals.go | 138 +++ .../Microsoft/hcsshim/hcn/hcnloadbalancer.go | 311 +++++++ .../Microsoft/hcsshim/hcn/hcnnamespace.go | 446 ++++++++++ .../Microsoft/hcsshim/hcn/hcnnetwork.go | 462 ++++++++++ .../Microsoft/hcsshim/hcn/hcnpolicy.go | 344 ++++++++ .../Microsoft/hcsshim/hcn/hcnroute.go | 266 ++++++ .../Microsoft/hcsshim/hcn/hcnsupport.go | 147 ++++ .../Microsoft/hcsshim/hcn/zsyscall_windows.go | 795 ++++++++++++++++++ .../hcsshim/internal/cni/registry.go | 110 +++ .../hcsshim/internal/regstate/regstate.go | 288 +++++++ .../internal/regstate/zsyscall_windows.go | 51 ++ .../hcsshim/internal/runhcs/container.go | 71 ++ .../Microsoft/hcsshim/internal/runhcs/util.go | 16 + .../Microsoft/hcsshim/internal/runhcs/vm.go | 43 + .../github.com/containerd/go-cni/.gitignore | 3 + .../containerd/go-cni/.golangci.yml | 23 + vendor/github.com/containerd/go-cni/LICENSE | 201 +++++ vendor/github.com/containerd/go-cni/Makefile | 41 + vendor/github.com/containerd/go-cni/README.md | 96 +++ vendor/github.com/containerd/go-cni/cni.go | 312 +++++++ .../containerd/go-cni/deprecated.go | 34 + vendor/github.com/containerd/go-cni/errors.go | 55 ++ vendor/github.com/containerd/go-cni/helper.go | 41 + .../github.com/containerd/go-cni/namespace.go | 81 ++ .../containerd/go-cni/namespace_opts.go | 77 ++ vendor/github.com/containerd/go-cni/opts.go | 273 ++++++ vendor/github.com/containerd/go-cni/result.go | 114 +++ .../github.com/containerd/go-cni/testutils.go | 78 ++ vendor/github.com/containerd/go-cni/types.go | 65 ++ .../containernetworking/cni/LICENSE | 202 +++++ .../containernetworking/cni/libcni/api.go | 679 +++++++++++++++ .../containernetworking/cni/libcni/conf.go | 270 ++++++ .../cni/pkg/invoke/args.go | 128 +++ .../cni/pkg/invoke/delegate.go | 80 ++ .../cni/pkg/invoke/exec.go | 181 ++++ .../cni/pkg/invoke/find.go | 48 ++ .../cni/pkg/invoke/os_unix.go | 20 + .../cni/pkg/invoke/os_windows.go | 18 + .../cni/pkg/invoke/raw_exec.go | 88 ++ .../cni/pkg/types/020/types.go | 189 +++++ .../cni/pkg/types/040/types.go | 306 +++++++ .../cni/pkg/types/100/types.go | 307 +++++++ .../containernetworking/cni/pkg/types/args.go | 122 +++ .../cni/pkg/types/create/create.go | 56 ++ .../cni/pkg/types/internal/convert.go | 92 ++ .../cni/pkg/types/internal/create.go | 66 ++ .../cni/pkg/types/types.go | 234 ++++++ .../cni/pkg/utils/utils.go | 84 ++ .../cni/pkg/version/conf.go | 26 + .../cni/pkg/version/plugin.go | 144 ++++ .../cni/pkg/version/reconcile.go | 49 ++ .../cni/pkg/version/version.go | 89 ++ .../executor/containerdexecutor/executor.go | 441 ++++++++++ .../exporter/containerimage/export.go | 490 +++++++++++ .../exporter/containerimage/writer.go | 572 +++++++++++++ .../moby/buildkit/exporter/oci/export.go | 365 ++++++++ .../snapshot/imagerefchecker/checker.go | 129 +++ .../buildkit/source/containerimage/pull.go | 366 ++++++++ .../util/network/cniprovider/allowempty.s | 0 .../buildkit/util/network/cniprovider/cni.go | 106 +++ .../util/network/cniprovider/cni_unsafe.go | 17 + .../network/cniprovider/createns_linux.go | 98 +++ .../util/network/cniprovider/createns_unix.go | 25 + .../network/cniprovider/createns_windows.go | 49 ++ .../util/network/netproviders/network.go | 61 ++ .../util/network/netproviders/network_unix.go | 18 + .../network/netproviders/network_windows.go | 18 + .../moby/buildkit/util/pull/pull.go | 274 ++++++ .../moby/buildkit/worker/base/worker.go | 481 +++++++++++ .../buildkit/worker/containerd/containerd.go | 150 ++++ vendor/modules.txt | 29 + 81 files changed, 13913 insertions(+), 41 deletions(-) create mode 100644 builder/builder-next/control/control.go create mode 100644 builder/builder-next/worker/containerdworker.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcn.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go create mode 100644 vendor/github.com/Microsoft/hcsshim/hcn/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/regstate/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/runhcs/util.go create mode 100644 vendor/github.com/Microsoft/hcsshim/internal/runhcs/vm.go create mode 100644 vendor/github.com/containerd/go-cni/.gitignore create mode 100644 vendor/github.com/containerd/go-cni/.golangci.yml create mode 100644 vendor/github.com/containerd/go-cni/LICENSE create mode 100644 vendor/github.com/containerd/go-cni/Makefile create mode 100644 vendor/github.com/containerd/go-cni/README.md create mode 100644 vendor/github.com/containerd/go-cni/cni.go create mode 100644 vendor/github.com/containerd/go-cni/deprecated.go create mode 100644 vendor/github.com/containerd/go-cni/errors.go create mode 100644 vendor/github.com/containerd/go-cni/helper.go create mode 100644 vendor/github.com/containerd/go-cni/namespace.go create mode 100644 vendor/github.com/containerd/go-cni/namespace_opts.go create mode 100644 vendor/github.com/containerd/go-cni/opts.go create mode 100644 vendor/github.com/containerd/go-cni/result.go create mode 100644 vendor/github.com/containerd/go-cni/testutils.go create mode 100644 vendor/github.com/containerd/go-cni/types.go create mode 100644 vendor/github.com/containernetworking/cni/LICENSE create mode 100644 vendor/github.com/containernetworking/cni/libcni/api.go create mode 100644 vendor/github.com/containernetworking/cni/libcni/conf.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/args.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/exec.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/find.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/types/020/types.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/types/040/types.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/types/100/types.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/types/args.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/types/create/create.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/types/internal/convert.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/types/internal/create.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/types/types.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/utils/utils.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/version/conf.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/version/plugin.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/version/reconcile.go create mode 100644 vendor/github.com/containernetworking/cni/pkg/version/version.go create mode 100644 vendor/github.com/moby/buildkit/executor/containerdexecutor/executor.go create mode 100644 vendor/github.com/moby/buildkit/exporter/containerimage/export.go create mode 100644 vendor/github.com/moby/buildkit/exporter/containerimage/writer.go create mode 100644 vendor/github.com/moby/buildkit/exporter/oci/export.go create mode 100644 vendor/github.com/moby/buildkit/snapshot/imagerefchecker/checker.go create mode 100644 vendor/github.com/moby/buildkit/source/containerimage/pull.go create mode 100644 vendor/github.com/moby/buildkit/util/network/cniprovider/allowempty.s create mode 100644 vendor/github.com/moby/buildkit/util/network/cniprovider/cni.go create mode 100644 vendor/github.com/moby/buildkit/util/network/cniprovider/cni_unsafe.go create mode 100644 vendor/github.com/moby/buildkit/util/network/cniprovider/createns_linux.go create mode 100644 vendor/github.com/moby/buildkit/util/network/cniprovider/createns_unix.go create mode 100644 vendor/github.com/moby/buildkit/util/network/cniprovider/createns_windows.go create mode 100644 vendor/github.com/moby/buildkit/util/network/netproviders/network.go create mode 100644 vendor/github.com/moby/buildkit/util/network/netproviders/network_unix.go create mode 100644 vendor/github.com/moby/buildkit/util/network/netproviders/network_windows.go create mode 100644 vendor/github.com/moby/buildkit/util/pull/pull.go create mode 100644 vendor/github.com/moby/buildkit/worker/base/worker.go create mode 100644 vendor/github.com/moby/buildkit/worker/containerd/containerd.go diff --git a/builder/builder-next/builder.go b/builder/builder-next/builder.go index 81688e2185fd6..631befebc4b98 100644 --- a/builder/builder-next/builder.go +++ b/builder/builder-next/builder.go @@ -12,9 +12,11 @@ import ( "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/remotes/docker" + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/builder" + mobycontrol "github.com/docker/docker/builder/builder-next/control" "github.com/docker/docker/daemon/config" "github.com/docker/docker/daemon/images" "github.com/docker/docker/libnetwork" @@ -23,7 +25,6 @@ import ( "github.com/docker/go-units" controlapi "github.com/moby/buildkit/api/services/control" "github.com/moby/buildkit/client" - "github.com/moby/buildkit/control" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/session" "github.com/moby/buildkit/util/entitlements" @@ -76,11 +77,14 @@ type Opt struct { IdentityMapping idtools.IdentityMapping DNSConfig config.DNSConfig ApparmorProfile string + UseSnapshotter bool + ContainerdAddress string + ContainerdNamespace string } // Builder can build using BuildKit backend type Builder struct { - controller *control.Controller + controller *mobycontrol.Controller reqBodyHandler *reqBodyHandler mu sync.Mutex @@ -327,14 +331,24 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder. frontendAttrs["ulimit"] = ulimits } - exporterName := "" - exporterAttrs := map[string]string{} + reposAndTags, err := sanitizeRepoAndTags(opt.Options.Tags) + if err != nil { + return nil, err + } + var names []string + for _, tag := range reposAndTags { + names = append(names, tag.String()) + } + + exporterName := client.ExporterImage + exporterAttrs := map[string]string{ + "image.name": strings.Join(names, ","), + "name": strings.Join(names, ","), + } if len(opt.Options.Outputs) > 1 { return nil, errors.Errorf("multiple outputs not supported") - } else if len(opt.Options.Outputs) == 0 { - exporterName = "moby" - } else { + } else if len(opt.Options.Outputs) == 1 { // cacheonly is a special type for triggering skipping all exporters if opt.Options.Outputs[0].Type != "cacheonly" { exporterName = opt.Options.Outputs[0].Type @@ -342,12 +356,6 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder. } } - if exporterName == "moby" { - if len(opt.Options.Tags) > 0 { - exporterAttrs["name"] = strings.Join(opt.Options.Tags, ",") - } - } - cache := controlapi.CacheOptions{} if inlineCache := opt.Options.BuildArgs["BUILDKIT_INLINE_CACHE"]; inlineCache != nil { @@ -630,3 +638,38 @@ func toBuildkitPruneInfo(opts types.BuildCachePruneOptions) (client.PruneInfo, e Filter: []string{strings.Join(bkFilter, ",")}, }, nil } + +// sanitizeRepoAndTags parses the raw "t" parameter received from the client +// to a slice of repoAndTag. +// It also validates each repoName and tag. +func sanitizeRepoAndTags(names []string) ([]reference.Named, error) { + var ( + repoAndTags []reference.Named + // This map is used for deduplicating the "-t" parameter. + uniqNames = make(map[string]struct{}) + ) + for _, repo := range names { + if repo == "" { + continue + } + + ref, err := reference.ParseNormalizedNamed(repo) + if err != nil { + return nil, err + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return nil, errors.New("build tag cannot contain a digest") + } + + ref = reference.TagNameOnly(ref) + + nameWithTag := ref.String() + + if _, exists := uniqNames[nameWithTag]; !exists { + uniqNames[nameWithTag] = struct{}{} + repoAndTags = append(repoAndTags, ref) + } + } + return repoAndTags, nil +} diff --git a/builder/builder-next/control/control.go b/builder/builder-next/control/control.go new file mode 100644 index 0000000000000..5e259f92e41b5 --- /dev/null +++ b/builder/builder-next/control/control.go @@ -0,0 +1,584 @@ +package control + +import ( + "context" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/docker/distribution/reference" + "github.com/moby/buildkit/util/bklog" + + controlapi "github.com/moby/buildkit/api/services/control" + apitypes "github.com/moby/buildkit/api/types" + "github.com/moby/buildkit/cache/remotecache" + "github.com/moby/buildkit/client" + controlgateway "github.com/moby/buildkit/control/gateway" + "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/frontend" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/session/grpchijack" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/imageutil" + "github.com/moby/buildkit/util/throttle" + "github.com/moby/buildkit/util/tracing/transform" + "github.com/moby/buildkit/worker" + "github.com/pkg/errors" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + tracev1 "go.opentelemetry.io/proto/otlp/collector/trace/v1" + "golang.org/x/sync/errgroup" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +var emptyLogVertexSize int + +func init() { + emptyLogVertex := controlapi.VertexLog{} + emptyLogVertexSize = emptyLogVertex.Size() +} + +// Opt is used to configure a Controller +type Opt struct { + SessionManager *session.Manager + WorkerController *worker.Controller + Frontends map[string]frontend.Frontend + CacheKeyStorage solver.CacheKeyStorage + ResolveCacheExporterFuncs map[string]remotecache.ResolveCacheExporterFunc + ResolveCacheImporterFuncs map[string]remotecache.ResolveCacheImporterFunc + Entitlements []string + TraceCollector sdktrace.SpanExporter + UseSnapshotter bool +} + +// Controller registers itself as a GRPC build server +type Controller struct { // TODO: ControlService + // buildCount needs to be 64bit aligned + buildCount int64 + opt Opt + solver *llbsolver.Solver + cache solver.CacheManager + gatewayForwarder *controlgateway.GatewayForwarder + throttledGC func() + gcmu sync.Mutex + *tracev1.UnimplementedTraceServiceServer +} + +// NewController creates a new build controller +func NewController(opt Opt) (*Controller, error) { + cache := solver.NewCacheManager(context.TODO(), "local", opt.CacheKeyStorage, worker.NewCacheResultStorage(opt.WorkerController)) + + gatewayForwarder := controlgateway.NewGatewayForwarder() + + solver, err := llbsolver.New(opt.WorkerController, opt.Frontends, cache, opt.ResolveCacheImporterFuncs, gatewayForwarder, opt.SessionManager, opt.Entitlements) + if err != nil { + return nil, errors.Wrap(err, "failed to create solver") + } + + c := &Controller{ + opt: opt, + solver: solver, + cache: cache, + gatewayForwarder: gatewayForwarder, + } + c.throttledGC = throttle.After(time.Minute, c.gc) + + defer func() { + time.AfterFunc(time.Second, c.throttledGC) + }() + + return c, nil +} + +// Register the controller as a build GRPC server +func (c *Controller) Register(server *grpc.Server) error { + controlapi.RegisterControlServer(server, c) + c.gatewayForwarder.Register(server) + tracev1.RegisterTraceServiceServer(server, c) + return nil +} + +// DiskUsage returns the disk usage +func (c *Controller) DiskUsage(ctx context.Context, r *controlapi.DiskUsageRequest) (*controlapi.DiskUsageResponse, error) { + resp := &controlapi.DiskUsageResponse{} + workers, err := c.opt.WorkerController.List() + if err != nil { + return nil, err + } + for _, w := range workers { + du, err := w.DiskUsage(ctx, client.DiskUsageInfo{ + Filter: r.Filter, + }) + if err != nil { + return nil, err + } + + for _, r := range du { + resp.Record = append(resp.Record, &controlapi.UsageRecord{ + // TODO: add worker info + ID: r.ID, + Mutable: r.Mutable, + InUse: r.InUse, + Size_: r.Size, + Parents: r.Parents, + UsageCount: int64(r.UsageCount), + Description: r.Description, + CreatedAt: r.CreatedAt, + LastUsedAt: r.LastUsedAt, + RecordType: string(r.RecordType), + Shared: r.Shared, + }) + } + } + return resp, nil +} + +// Prune prunes all workers +func (c *Controller) Prune(req *controlapi.PruneRequest, stream controlapi.Control_PruneServer) error { + if atomic.LoadInt64(&c.buildCount) == 0 { + imageutil.CancelCacheLeases() + } + + ch := make(chan client.UsageInfo) + + eg, ctx := errgroup.WithContext(stream.Context()) + workers, err := c.opt.WorkerController.List() + if err != nil { + return errors.Wrap(err, "failed to list workers for prune") + } + + didPrune := false + defer func() { + if didPrune { + if c, ok := c.cache.(interface { + ReleaseUnreferenced() error + }); ok { + if err := c.ReleaseUnreferenced(); err != nil { + bklog.G(ctx).Errorf("failed to release cache metadata: %+v", err) + } + } + } + }() + + for _, w := range workers { + func(w worker.Worker) { + eg.Go(func() error { + return w.Prune(ctx, ch, client.PruneInfo{ + Filter: req.Filter, + All: req.All, + KeepDuration: time.Duration(req.KeepDuration), + KeepBytes: req.KeepBytes, + }) + }) + }(w) + } + + eg2, _ := errgroup.WithContext(stream.Context()) + + eg2.Go(func() error { + defer close(ch) + return eg.Wait() + }) + + eg2.Go(func() error { + for r := range ch { + didPrune = true + if err := stream.Send(&controlapi.UsageRecord{ + // TODO: add worker info + ID: r.ID, + Mutable: r.Mutable, + InUse: r.InUse, + Size_: r.Size, + Parents: r.Parents, + UsageCount: int64(r.UsageCount), + Description: r.Description, + CreatedAt: r.CreatedAt, + LastUsedAt: r.LastUsedAt, + RecordType: string(r.RecordType), + Shared: r.Shared, + }); err != nil { + return err + } + } + return nil + }) + + return eg2.Wait() +} + +// Export ... +func (c *Controller) Export(ctx context.Context, req *tracev1.ExportTraceServiceRequest) (*tracev1.ExportTraceServiceResponse, error) { + if c.opt.TraceCollector == nil { + return nil, status.Errorf(codes.Unavailable, "trace collector not configured") + } + err := c.opt.TraceCollector.ExportSpans(ctx, transform.Spans(req.GetResourceSpans())) + if err != nil { + return nil, err + } + return &tracev1.ExportTraceServiceResponse{}, nil +} + +func translateLegacySolveRequest(req *controlapi.SolveRequest) error { + // translates ExportRef and ExportAttrs to new Exports (v0.4.0) + if legacyExportRef := req.Cache.ExportRefDeprecated; legacyExportRef != "" { + ex := &controlapi.CacheOptionsEntry{ + Type: "registry", + Attrs: req.Cache.ExportAttrsDeprecated, + } + if ex.Attrs == nil { + ex.Attrs = make(map[string]string) + } + ex.Attrs["ref"] = legacyExportRef + // FIXME(AkihiroSuda): skip append if already exists + req.Cache.Exports = append(req.Cache.Exports, ex) + req.Cache.ExportRefDeprecated = "" + req.Cache.ExportAttrsDeprecated = nil + } + // translates ImportRefs to new Imports (v0.4.0) + for _, legacyImportRef := range req.Cache.ImportRefsDeprecated { + im := &controlapi.CacheOptionsEntry{ + Type: "registry", + Attrs: map[string]string{"ref": legacyImportRef}, + } + // FIXME(AkihiroSuda): skip append if already exists + req.Cache.Imports = append(req.Cache.Imports, im) + } + req.Cache.ImportRefsDeprecated = nil + return nil +} + +// Solve solves a build request +func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*controlapi.SolveResponse, error) { + atomic.AddInt64(&c.buildCount, 1) + defer atomic.AddInt64(&c.buildCount, -1) + + // This method registers job ID in solver.Solve. Make sure there are no blocking calls before that might delay this. + + if err := translateLegacySolveRequest(req); err != nil { + return nil, err + } + + defer func() { + time.AfterFunc(time.Second, c.throttledGC) + }() + + var expi exporter.ExporterInstance + // TODO: multiworker + // This is actually tricky, as the exporter should come from the worker that has the returned reference. We may need to delay this so that the solver loads this. + w, err := c.opt.WorkerController.GetDefault() + if err != nil { + return nil, err + } + if req.Exporter != "" { + var exp exporter.Exporter + if c.opt.UseSnapshotter { + if req.Exporter == "moby" { + req.Exporter = client.ExporterImage + } + + exp, err = w.Exporter(req.Exporter, c.opt.SessionManager) + if err != nil { + return nil, err + } + + if req.ExporterAttrs != nil { + reposAndTags, err := sanitizeRepoAndTags(strings.Split(req.ExporterAttrs["name"], ",")) + if err != nil { + return nil, err + } + var names []string + for _, tag := range reposAndTags { + names = append(names, tag.String()) + } + + req.ExporterAttrs["name"] = strings.Join(names, ",") + req.ExporterAttrs["unpack"] = "true" + } + } else { + exp, err = w.Exporter(req.Exporter, c.opt.SessionManager) + if err != nil { + return nil, err + } + } + expi, err = exp.Resolve(ctx, req.ExporterAttrs) + if err != nil { + return nil, err + } + } + + var ( + cacheExporter remotecache.Exporter + cacheExportMode solver.CacheExportMode + cacheImports []frontend.CacheOptionsEntry + ) + if len(req.Cache.Exports) > 1 { + // TODO(AkihiroSuda): this should be fairly easy + return nil, errors.New("specifying multiple cache exports is not supported currently") + } + + if len(req.Cache.Exports) == 1 { + e := req.Cache.Exports[0] + cacheExporterFunc, ok := c.opt.ResolveCacheExporterFuncs[e.Type] + if !ok { + return nil, errors.Errorf("unknown cache exporter: %q", e.Type) + } + cacheExporter, err = cacheExporterFunc(ctx, session.NewGroup(req.Session), e.Attrs) + if err != nil { + return nil, err + } + if exportMode, supported := parseCacheExportMode(e.Attrs["mode"]); !supported { + bklog.G(ctx).Debugf("skipping invalid cache export mode: %s", e.Attrs["mode"]) + } else { + cacheExportMode = exportMode + } + } + for _, im := range req.Cache.Imports { + cacheImports = append(cacheImports, frontend.CacheOptionsEntry{ + Type: im.Type, + Attrs: im.Attrs, + }) + } + + resp, err := c.solver.Solve(ctx, req.Ref, req.Session, frontend.SolveRequest{ + Frontend: req.Frontend, + Definition: req.Definition, + FrontendOpt: req.FrontendAttrs, + FrontendInputs: req.FrontendInputs, + CacheImports: cacheImports, + }, llbsolver.ExporterRequest{ + Exporter: expi, + CacheExporter: cacheExporter, + CacheExportMode: cacheExportMode, + }, req.Entitlements) + if err != nil { + return nil, err + } + return &controlapi.SolveResponse{ + ExporterResponse: resp.ExporterResponse, + }, nil +} + +// Status streams the statuses of a solve request +func (c *Controller) Status(req *controlapi.StatusRequest, stream controlapi.Control_StatusServer) error { + ch := make(chan *client.SolveStatus, 8) + + eg, ctx := errgroup.WithContext(stream.Context()) + eg.Go(func() error { + return c.solver.Status(ctx, req.Ref, ch) + }) + + eg.Go(func() error { + for { + ss, ok := <-ch + if !ok { + return nil + } + logSize := 0 + for { + retry := false + sr := controlapi.StatusResponse{} + for _, v := range ss.Vertexes { + sr.Vertexes = append(sr.Vertexes, &controlapi.Vertex{ + Digest: v.Digest, + Inputs: v.Inputs, + Name: v.Name, + Started: v.Started, + Completed: v.Completed, + Error: v.Error, + Cached: v.Cached, + ProgressGroup: v.ProgressGroup, + }) + } + for _, v := range ss.Statuses { + sr.Statuses = append(sr.Statuses, &controlapi.VertexStatus{ + ID: v.ID, + Vertex: v.Vertex, + Name: v.Name, + Current: v.Current, + Total: v.Total, + Timestamp: v.Timestamp, + Started: v.Started, + Completed: v.Completed, + }) + } + for i, v := range ss.Logs { + sr.Logs = append(sr.Logs, &controlapi.VertexLog{ + Vertex: v.Vertex, + Stream: int64(v.Stream), + Msg: v.Data, + Timestamp: v.Timestamp, + }) + logSize += len(v.Data) + emptyLogVertexSize + // avoid logs growing big and split apart if they do + if logSize > 1024*1024 { + ss.Vertexes = nil + ss.Statuses = nil + ss.Logs = ss.Logs[i+1:] + retry = true + break + } + } + for _, v := range ss.Warnings { + sr.Warnings = append(sr.Warnings, &controlapi.VertexWarning{ + Vertex: v.Vertex, + Level: int64(v.Level), + Short: v.Short, + Detail: v.Detail, + Info: v.SourceInfo, + Ranges: v.Range, + Url: v.URL, + }) + } + if err := stream.SendMsg(&sr); err != nil { + return err + } + if !retry { + break + } + } + } + }) + + return eg.Wait() +} + +// Session ... +func (c *Controller) Session(stream controlapi.Control_SessionServer) error { + bklog.G(stream.Context()).Debugf("session started") + + conn, closeCh, opts := grpchijack.Hijack(stream) + defer conn.Close() + + ctx, cancel := context.WithCancel(stream.Context()) + go func() { + <-closeCh + cancel() + }() + + err := c.opt.SessionManager.HandleConn(ctx, conn, opts) + bklog.G(ctx).Debugf("session finished: %v", err) + return err +} + +// ListWorkers returns all the available workers +func (c *Controller) ListWorkers(ctx context.Context, r *controlapi.ListWorkersRequest) (*controlapi.ListWorkersResponse, error) { + resp := &controlapi.ListWorkersResponse{} + workers, err := c.opt.WorkerController.List(r.Filter...) + if err != nil { + return nil, err + } + for _, w := range workers { + resp.Record = append(resp.Record, &apitypes.WorkerRecord{ + ID: w.ID(), + Labels: w.Labels(), + Platforms: pb.PlatformsFromSpec(w.Platforms(true)), + GCPolicy: toPBGCPolicy(w.GCPolicy()), + }) + } + return resp, nil +} + +func (c *Controller) gc() { + c.gcmu.Lock() + defer c.gcmu.Unlock() + + workers, err := c.opt.WorkerController.List() + if err != nil { + return + } + + eg, ctx := errgroup.WithContext(context.TODO()) + + var size int64 + ch := make(chan client.UsageInfo) + done := make(chan struct{}) + go func() { + for ui := range ch { + size += ui.Size + } + close(done) + }() + + for _, w := range workers { + func(w worker.Worker) { + eg.Go(func() error { + if policy := w.GCPolicy(); len(policy) > 0 { + return w.Prune(ctx, ch, policy...) + } + return nil + }) + }(w) + } + + err = eg.Wait() + close(ch) + if err != nil { + bklog.G(ctx).Errorf("gc error: %+v", err) + } + <-done + if size > 0 { + bklog.G(ctx).Debugf("gc cleaned up %d bytes", size) + } +} + +func parseCacheExportMode(mode string) (solver.CacheExportMode, bool) { + switch mode { + case "min": + return solver.CacheExportModeMin, true + case "max": + return solver.CacheExportModeMax, true + } + return solver.CacheExportModeMin, false +} + +func toPBGCPolicy(in []client.PruneInfo) []*apitypes.GCPolicy { + policy := make([]*apitypes.GCPolicy, 0, len(in)) + for _, p := range in { + policy = append(policy, &apitypes.GCPolicy{ + All: p.All, + KeepBytes: p.KeepBytes, + KeepDuration: int64(p.KeepDuration), + Filters: p.Filter, + }) + } + return policy +} + +// sanitizeRepoAndTags parses the raw "t" parameter received from the client +// to a slice of repoAndTag. +// It also validates each repoName and tag. +func sanitizeRepoAndTags(names []string) ([]reference.Named, error) { + var ( + repoAndTags []reference.Named + // This map is used for deduplicating the "-t" parameter. + uniqNames = make(map[string]struct{}) + ) + for _, repo := range names { + if repo == "" { + continue + } + + ref, err := reference.ParseNormalizedNamed(repo) + if err != nil { + return nil, err + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return nil, errors.New("build tag cannot contain a digest") + } + + ref = reference.TagNameOnly(ref) + + nameWithTag := ref.String() + + if _, exists := uniqNames[nameWithTag]; !exists { + uniqNames[nameWithTag] = struct{}{} + repoAndTags = append(repoAndTags, ref) + } + } + return repoAndTags, nil +} diff --git a/builder/builder-next/controller.go b/builder/builder-next/controller.go index f546c8f98f5d4..108adc9a32993 100644 --- a/builder/builder-next/controller.go +++ b/builder/builder-next/controller.go @@ -5,7 +5,9 @@ import ( "net/http" "os" "path/filepath" + "time" + ctd "github.com/containerd/containerd" "github.com/containerd/containerd/content/local" ctdmetadata "github.com/containerd/containerd/metadata" "github.com/containerd/containerd/snapshots" @@ -14,6 +16,7 @@ import ( "github.com/docker/docker/builder/builder-next/adapters/containerimage" "github.com/docker/docker/builder/builder-next/adapters/localinlinecache" "github.com/docker/docker/builder/builder-next/adapters/snapshot" + mobycontrol "github.com/docker/docker/builder/builder-next/control" containerimageexp "github.com/docker/docker/builder/builder-next/exporter" "github.com/docker/docker/builder/builder-next/imagerefchecker" mobyworker "github.com/docker/docker/builder/builder-next/worker" @@ -26,7 +29,6 @@ import ( inlineremotecache "github.com/moby/buildkit/cache/remotecache/inline" localremotecache "github.com/moby/buildkit/cache/remotecache/local" "github.com/moby/buildkit/client" - "github.com/moby/buildkit/control" "github.com/moby/buildkit/frontend" dockerfile "github.com/moby/buildkit/frontend/dockerfile/builder" "github.com/moby/buildkit/frontend/gateway" @@ -36,12 +38,98 @@ import ( "github.com/moby/buildkit/util/archutil" "github.com/moby/buildkit/util/entitlements" "github.com/moby/buildkit/util/leaseutil" + "github.com/moby/buildkit/util/network/cniprovider" + "github.com/moby/buildkit/util/network/netproviders" "github.com/moby/buildkit/worker" + "github.com/moby/buildkit/worker/containerd" "github.com/pkg/errors" bolt "go.etcd.io/bbolt" ) -func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) { +func newController(rt http.RoundTripper, opt Opt) (*mobycontrol.Controller, error) { + if opt.UseSnapshotter { + return newSnapshotterController(rt, opt) + } + return newGrapDriverController(rt, opt) +} + +func newSnapshotterController(rt http.RoundTripper, opt Opt) (*mobycontrol.Controller, error) { + if err := os.MkdirAll(opt.Root, 0711); err != nil { + return nil, err + } + + dist := opt.Dist + + cacheStorage, err := bboltcachestorage.NewStore(filepath.Join(opt.Root, "cache.db")) + if err != nil { + return nil, err + } + + nc := netproviders.Opt{ + Mode: "auto", + CNI: cniprovider.Opt{ + Root: opt.Root, + ConfigPath: "/etc/buildkit/cni.json", + BinaryDir: "/opt/cni/bin", + }, + } + dns := getDNSConfig(opt.DNSConfig) + + snapshotter := ctd.DefaultSnapshotter + + wo, err := containerd.NewWorkerOpt(opt.Root, opt.ContainerdAddress, snapshotter, opt.ContainerdNamespace, + opt.Rootless, map[string]string{}, dns, nc, opt.ApparmorProfile, nil, "", ctd.WithTimeout(60*time.Second)) + if err != nil { + return nil, err + } + + policy, err := getGCPolicy(opt.BuilderConfig, opt.Root) + if err != nil { + return nil, err + } + + wo.GCPolicy = policy + wo.RegistryHosts = opt.RegistryHosts + + w, err := mobyworker.NewContainerdWorker(context.TODO(), wo) + if err != nil { + return nil, err + } + + wc := &worker.Controller{} + + err = wc.Add(w) + if err != nil { + return nil, err + } + frontends := map[string]frontend.Frontend{ + "dockerfile.v0": forwarder.NewGatewayForwarder(wc, dockerfile.Build), + "gateway.v0": gateway.NewGatewayFrontend(wc), + } + + wa, err := wc.GetDefault() + if err != nil { + return nil, err + } + + return mobycontrol.NewController(mobycontrol.Opt{ + SessionManager: opt.SessionManager, + WorkerController: wc, + Frontends: frontends, + CacheKeyStorage: cacheStorage, + ResolveCacheImporterFuncs: map[string]remotecache.ResolveCacheImporterFunc{ + "registry": localinlinecache.ResolveCacheImporterFunc(opt.SessionManager, opt.RegistryHosts, wa.ContentStore(), dist.ReferenceStore, dist.ImageStore), + "local": localremotecache.ResolveCacheImporterFunc(opt.SessionManager), + }, + ResolveCacheExporterFuncs: map[string]remotecache.ResolveCacheExporterFunc{ + "inline": inlineremotecache.ResolveCacheExporterFunc(), + }, + Entitlements: getEntitlements(opt.BuilderConfig), + UseSnapshotter: true, + }) +} + +func newGrapDriverController(rt http.RoundTripper, opt Opt) (*mobycontrol.Controller, error) { if err := os.MkdirAll(opt.Root, 0711); err != nil { return nil, err } @@ -203,7 +291,7 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) { "gateway.v0": gateway.NewGatewayFrontend(wc), } - return control.NewController(control.Opt{ + return mobycontrol.NewController(mobycontrol.Opt{ SessionManager: opt.SessionManager, WorkerController: wc, Frontends: frontends, @@ -215,7 +303,8 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) { ResolveCacheExporterFuncs: map[string]remotecache.ResolveCacheExporterFunc{ "inline": inlineremotecache.ResolveCacheExporterFunc(), }, - Entitlements: getEntitlements(opt.BuilderConfig), + Entitlements: getEntitlements(opt.BuilderConfig), + UseSnapshotter: false, }) } diff --git a/builder/builder-next/worker/containerdworker.go b/builder/builder-next/worker/containerdworker.go new file mode 100644 index 0000000000000..928bb1e6380a1 --- /dev/null +++ b/builder/builder-next/worker/containerdworker.go @@ -0,0 +1,114 @@ +package worker + +import ( + "context" + + "github.com/containerd/containerd/content" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/executor" + "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/frontend" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/worker/base" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ContainerdWorker is a local worker instance with dedicated snapshotter, cache, and so on. +type ContainerdWorker struct { + baseWorker *base.Worker +} + +// NewContainerdWorker instantiates a local worker +func NewContainerdWorker(ctx context.Context, wo base.WorkerOpt) (*ContainerdWorker, error) { + bw, err := base.NewWorker(ctx, wo) + if err != nil { + return nil, err + } + return &ContainerdWorker{ + baseWorker: bw, + }, nil +} + +// ID returns worker ID +func (w *ContainerdWorker) ID() string { + return w.baseWorker.ID() +} + +// Labels returns map of all worker labels +func (w *ContainerdWorker) Labels() map[string]string { + return w.baseWorker.Labels() +} + +// Platforms returns one or more platforms supported by the image. +func (w *ContainerdWorker) Platforms(noCache bool) []ocispec.Platform { + return w.baseWorker.Platforms(noCache) +} + +// GCPolicy returns automatic GC Policy +func (w *ContainerdWorker) GCPolicy() []client.PruneInfo { + return w.baseWorker.GCPolicy() +} + +// ContentStore returns content store +func (w *ContainerdWorker) ContentStore() content.Store { + return w.baseWorker.ContentStore() +} + +// LoadRef loads a reference by ID +func (w *ContainerdWorker) LoadRef(ctx context.Context, id string, hidden bool) (cache.ImmutableRef, error) { + return w.baseWorker.LoadRef(ctx, id, hidden) +} + +// ResolveOp converts a LLB vertex into a LLB operation +func (w *ContainerdWorker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *session.Manager) (solver.Op, error) { + return w.baseWorker.ResolveOp(v, s, sm) +} + +// ResolveImageConfig returns image config for an image +func (w *ContainerdWorker) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) { + return w.baseWorker.ResolveImageConfig(ctx, ref, opt, sm, g) +} + +// DiskUsage returns disk usage report +func (w *ContainerdWorker) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error) { + return w.baseWorker.DiskUsage(ctx, opt) +} + +// Prune deletes reclaimable build cache +func (w *ContainerdWorker) Prune(ctx context.Context, ch chan client.UsageInfo, info ...client.PruneInfo) error { + return w.baseWorker.Prune(ctx, ch, info...) +} + +// Exporter returns exporter by name +func (w *ContainerdWorker) Exporter(name string, sm *session.Manager) (exporter.Exporter, error) { + switch name { + case "moby": + return w.baseWorker.Exporter(client.ExporterImage, sm) + default: + return w.baseWorker.Exporter(name, sm) + } +} + +// PruneCacheMounts removes the current cache snapshots for specified IDs +func (w *ContainerdWorker) PruneCacheMounts(ctx context.Context, ids []string) error { + return w.baseWorker.PruneCacheMounts(ctx, ids) +} + +// FromRemote converts a remote snapshot reference to a local one +func (w *ContainerdWorker) FromRemote(ctx context.Context, remote *solver.Remote) (cache.ImmutableRef, error) { + return w.baseWorker.FromRemote(ctx, remote) +} + +// Executor returns executor.Executor for running processes +func (w *ContainerdWorker) Executor() executor.Executor { + return w.baseWorker.Executor() +} + +// CacheManager returns cache.Manager for accessing local storage +func (w *ContainerdWorker) CacheManager() cache.Manager { + return w.baseWorker.CacheManager() +} diff --git a/cmd/dockerd/daemon.go b/cmd/dockerd/daemon.go index c1d739891f3ff..6c173f261246e 100644 --- a/cmd/dockerd/daemon.go +++ b/cmd/dockerd/daemon.go @@ -293,33 +293,35 @@ func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, e features: d.Features(), daemon: d, } - if !d.UsesSnapshotter() { - bk, err := buildkit.New(buildkit.Opt{ - SessionManager: sm, - Root: filepath.Join(config.Root, "buildkit"), - Dist: d.DistributionServices(), - NetworkController: d.NetworkController(), - DefaultCgroupParent: cgroupParent, - RegistryHosts: d.RegistryHosts(), - BuilderConfig: config.Builder, - Rootless: d.Rootless(), - IdentityMapping: d.IdentityMapping(), - DNSConfig: config.DNSConfig, - ApparmorProfile: daemon.DefaultApparmorProfile(), - }) - if err != nil { - return opts, err - } - bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService) - if err != nil { - return opts, errors.Wrap(err, "failed to create buildmanager") - } + bk, err := buildkit.New(buildkit.Opt{ + SessionManager: sm, + Root: filepath.Join(config.Root, "buildkit"), + Dist: d.DistributionServices(), + NetworkController: d.NetworkController(), + DefaultCgroupParent: cgroupParent, + RegistryHosts: d.RegistryHosts(), + BuilderConfig: config.Builder, + Rootless: d.Rootless(), + IdentityMapping: d.IdentityMapping(), + DNSConfig: config.DNSConfig, + ApparmorProfile: daemon.DefaultApparmorProfile(), + UseSnapshotter: d.UsesSnapshotter(), + ContainerdAddress: config.ContainerdAddr, + ContainerdNamespace: config.ContainerdNamespace, + }) + if err != nil { + return opts, err + } - ro.buildBackend = bb - ro.buildkit = bk + bb, err := buildbackend.NewBackend(d.ImageService(), manager, bk, d.EventsService) + if err != nil { + return opts, errors.Wrap(err, "failed to create buildmanager") } + ro.buildBackend = bb + ro.buildkit = bk + return ro, nil } diff --git a/vendor.mod b/vendor.mod index 6b744eca13bfc..c873bfee0050b 100644 --- a/vendor.mod +++ b/vendor.mod @@ -96,10 +96,12 @@ require ( github.com/cilium/ebpf v0.7.0 // indirect github.com/container-storage-interface/spec v1.5.0 // indirect github.com/containerd/console v1.0.3 // indirect + github.com/containerd/go-cni v1.1.6 // indirect github.com/containerd/go-runc v1.0.0 // indirect github.com/containerd/stargz-snapshotter v0.11.3 // indirect github.com/containerd/stargz-snapshotter/estargz v0.11.3 // indirect github.com/containerd/ttrpc v1.1.0 // indirect + github.com/containernetworking/cni v1.1.1 // indirect github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/dustin/go-humanize v1.0.0 // indirect github.com/felixge/httpsnoop v1.0.2 // indirect diff --git a/vendor.sum b/vendor.sum index 29564772469af..a0e38ba6ba9ca 100644 --- a/vendor.sum +++ b/vendor.sum @@ -264,6 +264,8 @@ github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZH github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA= +github.com/containerd/go-cni v1.1.6 h1:el5WPymG5nRRLQF1EfB97FWob4Tdc8INg8RZMaXWZlo= +github.com/containerd/go-cni v1.1.6/go.mod h1:BWtoWl5ghVymxu6MBjg79W9NZrCRyHIdUtk4cauMe34= github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= @@ -304,6 +306,8 @@ github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y= +github.com/containernetworking/cni v1.1.1 h1:ky20T7c0MvKvbMOwS/FrlbNwjEoqJEUUYfsL4b0mc4k= +github.com/containernetworking/cni v1.1.1/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE= @@ -561,6 +565,7 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= @@ -815,6 +820,8 @@ github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1ls github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= @@ -824,8 +831,9 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= -github.com/onsi/gomega v1.15.0 h1:WjP/FQ/sk43MRmnEcT+MlDw2TFvkrXlprrPST/IudjU= github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= +github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -982,6 +990,7 @@ github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcn.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcn.go new file mode 100644 index 0000000000000..df3a59a78cb30 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcn.go @@ -0,0 +1,328 @@ +// Package hcn is a shim for the Host Compute Networking (HCN) service, which manages networking for Windows Server +// containers and Hyper-V containers. Previous to RS5, HCN was referred to as Host Networking Service (HNS). +package hcn + +import ( + "encoding/json" + "fmt" + "syscall" + + "github.com/Microsoft/go-winio/pkg/guid" +) + +//go:generate go run ../mksyscall_windows.go -output zsyscall_windows.go hcn.go + +/// HNS V1 API + +//sys SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) = iphlpapi.SetCurrentThreadCompartmentId +//sys _hnsCall(method string, path string, object string, response **uint16) (hr error) = vmcompute.HNSCall? + +/// HCN V2 API + +// Network +//sys hcnEnumerateNetworks(query string, networks **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateNetworks? +//sys hcnCreateNetwork(id *_guid, settings string, network *hcnNetwork, result **uint16) (hr error) = computenetwork.HcnCreateNetwork? +//sys hcnOpenNetwork(id *_guid, network *hcnNetwork, result **uint16) (hr error) = computenetwork.HcnOpenNetwork? +//sys hcnModifyNetwork(network hcnNetwork, settings string, result **uint16) (hr error) = computenetwork.HcnModifyNetwork? +//sys hcnQueryNetworkProperties(network hcnNetwork, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQueryNetworkProperties? +//sys hcnDeleteNetwork(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteNetwork? +//sys hcnCloseNetwork(network hcnNetwork) (hr error) = computenetwork.HcnCloseNetwork? + +// Endpoint +//sys hcnEnumerateEndpoints(query string, endpoints **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateEndpoints? +//sys hcnCreateEndpoint(network hcnNetwork, id *_guid, settings string, endpoint *hcnEndpoint, result **uint16) (hr error) = computenetwork.HcnCreateEndpoint? +//sys hcnOpenEndpoint(id *_guid, endpoint *hcnEndpoint, result **uint16) (hr error) = computenetwork.HcnOpenEndpoint? +//sys hcnModifyEndpoint(endpoint hcnEndpoint, settings string, result **uint16) (hr error) = computenetwork.HcnModifyEndpoint? +//sys hcnQueryEndpointProperties(endpoint hcnEndpoint, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQueryEndpointProperties? +//sys hcnDeleteEndpoint(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteEndpoint? +//sys hcnCloseEndpoint(endpoint hcnEndpoint) (hr error) = computenetwork.HcnCloseEndpoint? + +// Namespace +//sys hcnEnumerateNamespaces(query string, namespaces **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateNamespaces? +//sys hcnCreateNamespace(id *_guid, settings string, namespace *hcnNamespace, result **uint16) (hr error) = computenetwork.HcnCreateNamespace? +//sys hcnOpenNamespace(id *_guid, namespace *hcnNamespace, result **uint16) (hr error) = computenetwork.HcnOpenNamespace? +//sys hcnModifyNamespace(namespace hcnNamespace, settings string, result **uint16) (hr error) = computenetwork.HcnModifyNamespace? +//sys hcnQueryNamespaceProperties(namespace hcnNamespace, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQueryNamespaceProperties? +//sys hcnDeleteNamespace(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteNamespace? +//sys hcnCloseNamespace(namespace hcnNamespace) (hr error) = computenetwork.HcnCloseNamespace? + +// LoadBalancer +//sys hcnEnumerateLoadBalancers(query string, loadBalancers **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateLoadBalancers? +//sys hcnCreateLoadBalancer(id *_guid, settings string, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) = computenetwork.HcnCreateLoadBalancer? +//sys hcnOpenLoadBalancer(id *_guid, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) = computenetwork.HcnOpenLoadBalancer? +//sys hcnModifyLoadBalancer(loadBalancer hcnLoadBalancer, settings string, result **uint16) (hr error) = computenetwork.HcnModifyLoadBalancer? +//sys hcnQueryLoadBalancerProperties(loadBalancer hcnLoadBalancer, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQueryLoadBalancerProperties? +//sys hcnDeleteLoadBalancer(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteLoadBalancer? +//sys hcnCloseLoadBalancer(loadBalancer hcnLoadBalancer) (hr error) = computenetwork.HcnCloseLoadBalancer? + +// SDN Routes +//sys hcnEnumerateRoutes(query string, routes **uint16, result **uint16) (hr error) = computenetwork.HcnEnumerateSdnRoutes? +//sys hcnCreateRoute(id *_guid, settings string, route *hcnRoute, result **uint16) (hr error) = computenetwork.HcnCreateSdnRoute? +//sys hcnOpenRoute(id *_guid, route *hcnRoute, result **uint16) (hr error) = computenetwork.HcnOpenSdnRoute? +//sys hcnModifyRoute(route hcnRoute, settings string, result **uint16) (hr error) = computenetwork.HcnModifySdnRoute? +//sys hcnQueryRouteProperties(route hcnRoute, query string, properties **uint16, result **uint16) (hr error) = computenetwork.HcnQuerySdnRouteProperties? +//sys hcnDeleteRoute(id *_guid, result **uint16) (hr error) = computenetwork.HcnDeleteSdnRoute? +//sys hcnCloseRoute(route hcnRoute) (hr error) = computenetwork.HcnCloseSdnRoute? + +type _guid = guid.GUID + +type hcnNetwork syscall.Handle +type hcnEndpoint syscall.Handle +type hcnNamespace syscall.Handle +type hcnLoadBalancer syscall.Handle +type hcnRoute syscall.Handle + +// SchemaVersion for HCN Objects/Queries. +type SchemaVersion = Version // hcnglobals.go + +// HostComputeQueryFlags are passed in to a HostComputeQuery to determine which +// properties of an object are returned. +type HostComputeQueryFlags uint32 + +var ( + // HostComputeQueryFlagsNone returns an object with the standard properties. + HostComputeQueryFlagsNone HostComputeQueryFlags + // HostComputeQueryFlagsDetailed returns an object with all properties. + HostComputeQueryFlagsDetailed HostComputeQueryFlags = 1 +) + +// HostComputeQuery is the format for HCN queries. +type HostComputeQuery struct { + SchemaVersion SchemaVersion `json:""` + Flags HostComputeQueryFlags `json:",omitempty"` + Filter string `json:",omitempty"` +} + +type ExtraParams struct { + Resources json.RawMessage `json:",omitempty"` + SharedContainers json.RawMessage `json:",omitempty"` + LayeredOn string `json:",omitempty"` + SwitchGuid string `json:",omitempty"` + UtilityVM string `json:",omitempty"` + VirtualMachine string `json:",omitempty"` +} + +type Health struct { + Data interface{} `json:",omitempty"` + Extra ExtraParams `json:",omitempty"` +} + +// defaultQuery generates HCN Query. +// Passed into get/enumerate calls to filter results. +func defaultQuery() HostComputeQuery { + query := HostComputeQuery{ + SchemaVersion: SchemaVersion{ + Major: 2, + Minor: 0, + }, + Flags: HostComputeQueryFlagsNone, + } + return query +} + +// PlatformDoesNotSupportError happens when users are attempting to use a newer shim on an older OS +func platformDoesNotSupportError(featureName string) error { + return fmt.Errorf("platform does not support feature %s", featureName) +} + +// V2ApiSupported returns an error if the HCN version does not support the V2 Apis. +func V2ApiSupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.Api.V2 { + return nil + } + return platformDoesNotSupportError("V2 Api/Schema") +} + +func V2SchemaVersion() SchemaVersion { + return SchemaVersion{ + Major: 2, + Minor: 0, + } +} + +// RemoteSubnetSupported returns an error if the HCN version does not support Remote Subnet policies. +func RemoteSubnetSupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.RemoteSubnet { + return nil + } + return platformDoesNotSupportError("Remote Subnet") +} + +// HostRouteSupported returns an error if the HCN version does not support Host Route policies. +func HostRouteSupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.HostRoute { + return nil + } + return platformDoesNotSupportError("Host Route") +} + +// DSRSupported returns an error if the HCN version does not support Direct Server Return. +func DSRSupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.DSR { + return nil + } + return platformDoesNotSupportError("Direct Server Return (DSR)") +} + +// Slash32EndpointPrefixesSupported returns an error if the HCN version does not support configuring endpoints with /32 prefixes. +func Slash32EndpointPrefixesSupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.Slash32EndpointPrefixes { + return nil + } + return platformDoesNotSupportError("Slash 32 Endpoint prefixes") +} + +// AclSupportForProtocol252Supported returns an error if the HCN version does not support HNS ACL Policies to support protocol 252 for VXLAN. +func AclSupportForProtocol252Supported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.AclSupportForProtocol252 { + return nil + } + return platformDoesNotSupportError("HNS ACL Policies to support protocol 252 for VXLAN") +} + +// SessionAffinitySupported returns an error if the HCN version does not support Session Affinity. +func SessionAffinitySupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.SessionAffinity { + return nil + } + return platformDoesNotSupportError("Session Affinity") +} + +// IPv6DualStackSupported returns an error if the HCN version does not support IPv6DualStack. +func IPv6DualStackSupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.IPv6DualStack { + return nil + } + return platformDoesNotSupportError("IPv6 DualStack") +} + +//L4proxySupported returns an error if the HCN verison does not support L4Proxy +func L4proxyPolicySupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.L4Proxy { + return nil + } + return platformDoesNotSupportError("L4ProxyPolicy") +} + +// L4WfpProxySupported returns an error if the HCN verison does not support L4WfpProxy +func L4WfpProxyPolicySupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.L4WfpProxy { + return nil + } + return platformDoesNotSupportError("L4WfpProxyPolicy") +} + +// SetPolicySupported returns an error if the HCN version does not support SetPolicy. +func SetPolicySupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.SetPolicy { + return nil + } + return platformDoesNotSupportError("SetPolicy") +} + +// VxlanPortSupported returns an error if the HCN version does not support configuring the VXLAN TCP port. +func VxlanPortSupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.VxlanPort { + return nil + } + return platformDoesNotSupportError("VXLAN port configuration") +} + +// TierAclPolicySupported returns an error if the HCN version does not support configuring the TierAcl. +func TierAclPolicySupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.TierAcl { + return nil + } + return platformDoesNotSupportError("TierAcl") +} + +// NetworkACLPolicySupported returns an error if the HCN version does not support NetworkACLPolicy +func NetworkACLPolicySupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.NetworkACL { + return nil + } + return platformDoesNotSupportError("NetworkACL") +} + +// NestedIpSetSupported returns an error if the HCN version does not support NestedIpSet +func NestedIpSetSupported() error { + supported, err := GetCachedSupportedFeatures() + if err != nil { + return err + } + if supported.NestedIpSet { + return nil + } + return platformDoesNotSupportError("NestedIpSet") +} + +// RequestType are the different operations performed to settings. +// Used to update the settings of Endpoint/Namespace objects. +type RequestType string + +var ( + // RequestTypeAdd adds the provided settings object. + RequestTypeAdd RequestType = "Add" + // RequestTypeRemove removes the provided settings object. + RequestTypeRemove RequestType = "Remove" + // RequestTypeUpdate replaces settings with the ones provided. + RequestTypeUpdate RequestType = "Update" + // RequestTypeRefresh refreshes the settings provided. + RequestTypeRefresh RequestType = "Refresh" +) diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go new file mode 100644 index 0000000000000..545e8639d6cfb --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnendpoint.go @@ -0,0 +1,388 @@ +package hcn + +import ( + "encoding/json" + "errors" + + "github.com/Microsoft/go-winio/pkg/guid" + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/sirupsen/logrus" +) + +// IpConfig is assoicated with an endpoint +type IpConfig struct { + IpAddress string `json:",omitempty"` + PrefixLength uint8 `json:",omitempty"` +} + +// EndpointFlags are special settings on an endpoint. +type EndpointFlags uint32 + +var ( + // EndpointFlagsNone is the default. + EndpointFlagsNone EndpointFlags + // EndpointFlagsRemoteEndpoint means that an endpoint is on another host. + EndpointFlagsRemoteEndpoint EndpointFlags = 1 +) + +// HostComputeEndpoint represents a network endpoint +type HostComputeEndpoint struct { + Id string `json:"ID,omitempty"` + Name string `json:",omitempty"` + HostComputeNetwork string `json:",omitempty"` // GUID + HostComputeNamespace string `json:",omitempty"` // GUID + Policies []EndpointPolicy `json:",omitempty"` + IpConfigurations []IpConfig `json:",omitempty"` + Dns Dns `json:",omitempty"` + Routes []Route `json:",omitempty"` + MacAddress string `json:",omitempty"` + Flags EndpointFlags `json:",omitempty"` + Health Health `json:",omitempty"` + SchemaVersion SchemaVersion `json:",omitempty"` +} + +// EndpointResourceType are the two different Endpoint settings resources. +type EndpointResourceType string + +var ( + // EndpointResourceTypePolicy is for Endpoint Policies. Ex: ACL, NAT + EndpointResourceTypePolicy EndpointResourceType = "Policy" + // EndpointResourceTypePort is for Endpoint Port settings. + EndpointResourceTypePort EndpointResourceType = "Port" +) + +// ModifyEndpointSettingRequest is the structure used to send request to modify an endpoint. +// Used to update policy/port on an endpoint. +type ModifyEndpointSettingRequest struct { + ResourceType EndpointResourceType `json:",omitempty"` // Policy, Port + RequestType RequestType `json:",omitempty"` // Add, Remove, Update, Refresh + Settings json.RawMessage `json:",omitempty"` +} + +// VmEndpointRequest creates a switch port with identifier `PortId`. +type VmEndpointRequest struct { + PortId guid.GUID `json:",omitempty"` + VirtualNicName string `json:",omitempty"` + VirtualMachineId guid.GUID `json:",omitempty"` +} + +type PolicyEndpointRequest struct { + Policies []EndpointPolicy `json:",omitempty"` +} + +func getEndpoint(endpointGuid guid.GUID, query string) (*HostComputeEndpoint, error) { + // Open endpoint. + var ( + endpointHandle hcnEndpoint + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + hr := hcnOpenEndpoint(&endpointGuid, &endpointHandle, &resultBuffer) + if err := checkForErrors("hcnOpenEndpoint", hr, resultBuffer); err != nil { + return nil, err + } + // Query endpoint. + hr = hcnQueryEndpointProperties(endpointHandle, query, &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryEndpointProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close endpoint. + hr = hcnCloseEndpoint(endpointHandle) + if err := checkForErrors("hcnCloseEndpoint", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeEndpoint + var outputEndpoint HostComputeEndpoint + if err := json.Unmarshal([]byte(properties), &outputEndpoint); err != nil { + return nil, err + } + return &outputEndpoint, nil +} + +func enumerateEndpoints(query string) ([]HostComputeEndpoint, error) { + // Enumerate all Endpoint Guids + var ( + resultBuffer *uint16 + endpointBuffer *uint16 + ) + hr := hcnEnumerateEndpoints(query, &endpointBuffer, &resultBuffer) + if err := checkForErrors("hcnEnumerateEndpoints", hr, resultBuffer); err != nil { + return nil, err + } + + endpoints := interop.ConvertAndFreeCoTaskMemString(endpointBuffer) + var endpointIds []guid.GUID + err := json.Unmarshal([]byte(endpoints), &endpointIds) + if err != nil { + return nil, err + } + + var outputEndpoints []HostComputeEndpoint + for _, endpointGuid := range endpointIds { + endpoint, err := getEndpoint(endpointGuid, query) + if err != nil { + return nil, err + } + outputEndpoints = append(outputEndpoints, *endpoint) + } + return outputEndpoints, nil +} + +func createEndpoint(networkId string, endpointSettings string) (*HostComputeEndpoint, error) { + networkGuid, err := guid.FromString(networkId) + if err != nil { + return nil, errInvalidNetworkID + } + // Open network. + var networkHandle hcnNetwork + var resultBuffer *uint16 + hr := hcnOpenNetwork(&networkGuid, &networkHandle, &resultBuffer) + if err := checkForErrors("hcnOpenNetwork", hr, resultBuffer); err != nil { + return nil, err + } + // Create endpoint. + endpointId := guid.GUID{} + var endpointHandle hcnEndpoint + hr = hcnCreateEndpoint(networkHandle, &endpointId, endpointSettings, &endpointHandle, &resultBuffer) + if err := checkForErrors("hcnCreateEndpoint", hr, resultBuffer); err != nil { + return nil, err + } + // Query endpoint. + hcnQuery := defaultQuery() + query, err := json.Marshal(hcnQuery) + if err != nil { + return nil, err + } + var propertiesBuffer *uint16 + hr = hcnQueryEndpointProperties(endpointHandle, string(query), &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryEndpointProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close endpoint. + hr = hcnCloseEndpoint(endpointHandle) + if err := checkForErrors("hcnCloseEndpoint", hr, nil); err != nil { + return nil, err + } + // Close network. + hr = hcnCloseNetwork(networkHandle) + if err := checkForErrors("hcnCloseNetwork", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeEndpoint + var outputEndpoint HostComputeEndpoint + if err := json.Unmarshal([]byte(properties), &outputEndpoint); err != nil { + return nil, err + } + return &outputEndpoint, nil +} + +func modifyEndpoint(endpointId string, settings string) (*HostComputeEndpoint, error) { + endpointGuid, err := guid.FromString(endpointId) + if err != nil { + return nil, errInvalidEndpointID + } + // Open endpoint + var ( + endpointHandle hcnEndpoint + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + hr := hcnOpenEndpoint(&endpointGuid, &endpointHandle, &resultBuffer) + if err := checkForErrors("hcnOpenEndpoint", hr, resultBuffer); err != nil { + return nil, err + } + // Modify endpoint + hr = hcnModifyEndpoint(endpointHandle, settings, &resultBuffer) + if err := checkForErrors("hcnModifyEndpoint", hr, resultBuffer); err != nil { + return nil, err + } + // Query endpoint. + hcnQuery := defaultQuery() + query, err := json.Marshal(hcnQuery) + if err != nil { + return nil, err + } + hr = hcnQueryEndpointProperties(endpointHandle, string(query), &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryEndpointProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close endpoint. + hr = hcnCloseEndpoint(endpointHandle) + if err := checkForErrors("hcnCloseEndpoint", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeEndpoint + var outputEndpoint HostComputeEndpoint + if err := json.Unmarshal([]byte(properties), &outputEndpoint); err != nil { + return nil, err + } + return &outputEndpoint, nil +} + +func deleteEndpoint(endpointId string) error { + endpointGuid, err := guid.FromString(endpointId) + if err != nil { + return errInvalidEndpointID + } + var resultBuffer *uint16 + hr := hcnDeleteEndpoint(&endpointGuid, &resultBuffer) + if err := checkForErrors("hcnDeleteEndpoint", hr, resultBuffer); err != nil { + return err + } + return nil +} + +// ListEndpoints makes a call to list all available endpoints. +func ListEndpoints() ([]HostComputeEndpoint, error) { + hcnQuery := defaultQuery() + endpoints, err := ListEndpointsQuery(hcnQuery) + if err != nil { + return nil, err + } + return endpoints, nil +} + +// ListEndpointsQuery makes a call to query the list of available endpoints. +func ListEndpointsQuery(query HostComputeQuery) ([]HostComputeEndpoint, error) { + queryJson, err := json.Marshal(query) + if err != nil { + return nil, err + } + + endpoints, err := enumerateEndpoints(string(queryJson)) + if err != nil { + return nil, err + } + return endpoints, nil +} + +// ListEndpointsOfNetwork queries the list of endpoints on a network. +func ListEndpointsOfNetwork(networkId string) ([]HostComputeEndpoint, error) { + hcnQuery := defaultQuery() + // TODO: Once query can convert schema, change to {HostComputeNetwork:networkId} + mapA := map[string]string{"VirtualNetwork": networkId} + filter, err := json.Marshal(mapA) + if err != nil { + return nil, err + } + hcnQuery.Filter = string(filter) + + return ListEndpointsQuery(hcnQuery) +} + +// GetEndpointByID returns an endpoint specified by Id +func GetEndpointByID(endpointId string) (*HostComputeEndpoint, error) { + hcnQuery := defaultQuery() + mapA := map[string]string{"ID": endpointId} + filter, err := json.Marshal(mapA) + if err != nil { + return nil, err + } + hcnQuery.Filter = string(filter) + + endpoints, err := ListEndpointsQuery(hcnQuery) + if err != nil { + return nil, err + } + if len(endpoints) == 0 { + return nil, EndpointNotFoundError{EndpointID: endpointId} + } + return &endpoints[0], err +} + +// GetEndpointByName returns an endpoint specified by Name +func GetEndpointByName(endpointName string) (*HostComputeEndpoint, error) { + hcnQuery := defaultQuery() + mapA := map[string]string{"Name": endpointName} + filter, err := json.Marshal(mapA) + if err != nil { + return nil, err + } + hcnQuery.Filter = string(filter) + + endpoints, err := ListEndpointsQuery(hcnQuery) + if err != nil { + return nil, err + } + if len(endpoints) == 0 { + return nil, EndpointNotFoundError{EndpointName: endpointName} + } + return &endpoints[0], err +} + +// Create Endpoint. +func (endpoint *HostComputeEndpoint) Create() (*HostComputeEndpoint, error) { + logrus.Debugf("hcn::HostComputeEndpoint::Create id=%s", endpoint.Id) + + if endpoint.HostComputeNamespace != "" { + return nil, errors.New("endpoint create error, endpoint json HostComputeNamespace is read only and should not be set") + } + + jsonString, err := json.Marshal(endpoint) + if err != nil { + return nil, err + } + + logrus.Debugf("hcn::HostComputeEndpoint::Create JSON: %s", jsonString) + endpoint, hcnErr := createEndpoint(endpoint.HostComputeNetwork, string(jsonString)) + if hcnErr != nil { + return nil, hcnErr + } + return endpoint, nil +} + +// Delete Endpoint. +func (endpoint *HostComputeEndpoint) Delete() error { + logrus.Debugf("hcn::HostComputeEndpoint::Delete id=%s", endpoint.Id) + + if err := deleteEndpoint(endpoint.Id); err != nil { + return err + } + return nil +} + +// ModifyEndpointSettings updates the Port/Policy of an Endpoint. +func ModifyEndpointSettings(endpointId string, request *ModifyEndpointSettingRequest) error { + logrus.Debugf("hcn::HostComputeEndpoint::ModifyEndpointSettings id=%s", endpointId) + + endpointSettingsRequest, err := json.Marshal(request) + if err != nil { + return err + } + + _, err = modifyEndpoint(endpointId, string(endpointSettingsRequest)) + if err != nil { + return err + } + return nil +} + +// ApplyPolicy applies a Policy (ex: ACL) on the Endpoint. +func (endpoint *HostComputeEndpoint) ApplyPolicy(requestType RequestType, endpointPolicy PolicyEndpointRequest) error { + logrus.Debugf("hcn::HostComputeEndpoint::ApplyPolicy id=%s", endpoint.Id) + + settingsJson, err := json.Marshal(endpointPolicy) + if err != nil { + return err + } + requestMessage := &ModifyEndpointSettingRequest{ + ResourceType: EndpointResourceTypePolicy, + RequestType: requestType, + Settings: settingsJson, + } + + return ModifyEndpointSettings(endpoint.Id, requestMessage) +} + +// NamespaceAttach modifies a Namespace to add an endpoint. +func (endpoint *HostComputeEndpoint) NamespaceAttach(namespaceId string) error { + return AddNamespaceEndpoint(namespaceId, endpoint.Id) +} + +// NamespaceDetach modifies a Namespace to remove an endpoint. +func (endpoint *HostComputeEndpoint) NamespaceDetach(namespaceId string) error { + return RemoveNamespaceEndpoint(namespaceId, endpoint.Id) +} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go new file mode 100644 index 0000000000000..ad30d320d97eb --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnerrors.go @@ -0,0 +1,164 @@ +// Package hcn is a shim for the Host Compute Networking (HCN) service, which manages networking for Windows Server +// containers and Hyper-V containers. Previous to RS5, HCN was referred to as Host Networking Service (HNS). +package hcn + +import ( + "errors" + "fmt" + + "github.com/Microsoft/hcsshim/internal/hcs" + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/sirupsen/logrus" +) + +var ( + errInvalidNetworkID = errors.New("invalid network ID") + errInvalidEndpointID = errors.New("invalid endpoint ID") + errInvalidNamespaceID = errors.New("invalid namespace ID") + errInvalidLoadBalancerID = errors.New("invalid load balancer ID") + errInvalidRouteID = errors.New("invalid route ID") +) + +func checkForErrors(methodName string, hr error, resultBuffer *uint16) error { + errorFound := false + + if hr != nil { + errorFound = true + } + + result := "" + if resultBuffer != nil { + result = interop.ConvertAndFreeCoTaskMemString(resultBuffer) + if result != "" { + errorFound = true + } + } + + if errorFound { + returnError := new(hr, methodName, result) + logrus.Debugf(returnError.Error()) // HCN errors logged for debugging. + return returnError + } + + return nil +} + +type ErrorCode uint32 + +// For common errors, define the error as it is in windows, so we can quickly determine it later +const ( + ERROR_NOT_FOUND = 0x490 + HCN_E_PORT_ALREADY_EXISTS ErrorCode = 0x803b0013 +) + +type HcnError struct { + *hcserror.HcsError + code ErrorCode +} + +func (e *HcnError) Error() string { + return e.HcsError.Error() +} + +func CheckErrorWithCode(err error, code ErrorCode) bool { + hcnError, ok := err.(*HcnError) + if ok { + return hcnError.code == code + } + return false +} + +func IsElementNotFoundError(err error) bool { + return CheckErrorWithCode(err, ERROR_NOT_FOUND) +} + +func IsPortAlreadyExistsError(err error) bool { + return CheckErrorWithCode(err, HCN_E_PORT_ALREADY_EXISTS) +} + +func new(hr error, title string, rest string) error { + err := &HcnError{} + hcsError := hcserror.New(hr, title, rest) + err.HcsError = hcsError.(*hcserror.HcsError) + err.code = ErrorCode(hcserror.Win32FromError(hr)) + return err +} + +// +// Note that the below errors are not errors returned by hcn itself +// we wish to seperate them as they are shim usage error +// + +// NetworkNotFoundError results from a failed seach for a network by Id or Name +type NetworkNotFoundError struct { + NetworkName string + NetworkID string +} + +func (e NetworkNotFoundError) Error() string { + if e.NetworkName != "" { + return fmt.Sprintf("Network name %q not found", e.NetworkName) + } + return fmt.Sprintf("Network ID %q not found", e.NetworkID) +} + +// EndpointNotFoundError results from a failed seach for an endpoint by Id or Name +type EndpointNotFoundError struct { + EndpointName string + EndpointID string +} + +func (e EndpointNotFoundError) Error() string { + if e.EndpointName != "" { + return fmt.Sprintf("Endpoint name %q not found", e.EndpointName) + } + return fmt.Sprintf("Endpoint ID %q not found", e.EndpointID) +} + +// NamespaceNotFoundError results from a failed seach for a namsepace by Id +type NamespaceNotFoundError struct { + NamespaceID string +} + +func (e NamespaceNotFoundError) Error() string { + return fmt.Sprintf("Namespace ID %q not found", e.NamespaceID) +} + +// LoadBalancerNotFoundError results from a failed seach for a loadbalancer by Id +type LoadBalancerNotFoundError struct { + LoadBalancerId string +} + +func (e LoadBalancerNotFoundError) Error() string { + return fmt.Sprintf("LoadBalancer %q not found", e.LoadBalancerId) +} + +// RouteNotFoundError results from a failed seach for a route by Id +type RouteNotFoundError struct { + RouteId string +} + +func (e RouteNotFoundError) Error() string { + return fmt.Sprintf("SDN Route %q not found", e.RouteId) +} + +// IsNotFoundError returns a boolean indicating whether the error was caused by +// a resource not being found. +func IsNotFoundError(err error) bool { + switch pe := err.(type) { + case NetworkNotFoundError: + return true + case EndpointNotFoundError: + return true + case NamespaceNotFoundError: + return true + case LoadBalancerNotFoundError: + return true + case RouteNotFoundError: + return true + case *hcserror.HcsError: + return pe.Err == hcs.ErrElementNotFound + } + return false +} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go new file mode 100644 index 0000000000000..14903bc5e9132 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnglobals.go @@ -0,0 +1,138 @@ +package hcn + +import ( + "encoding/json" + "fmt" + "math" + + "github.com/Microsoft/hcsshim/internal/hcserror" + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/sirupsen/logrus" +) + +// Globals are all global properties of the HCN Service. +type Globals struct { + Version Version `json:"Version"` +} + +// Version is the HCN Service version. +type Version struct { + Major int `json:"Major"` + Minor int `json:"Minor"` +} + +type VersionRange struct { + MinVersion Version + MaxVersion Version +} + +type VersionRanges []VersionRange + +var ( + // HNSVersion1803 added ACL functionality. + HNSVersion1803 = VersionRanges{VersionRange{MinVersion: Version{Major: 7, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} + // V2ApiSupport allows the use of V2 Api calls and V2 Schema. + V2ApiSupport = VersionRanges{VersionRange{MinVersion: Version{Major: 9, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} + // Remote Subnet allows for Remote Subnet policies on Overlay networks + RemoteSubnetVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 9, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} + // A Host Route policy allows for local container to local host communication Overlay networks + HostRouteVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 9, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} + // HNS 9.3 through 10.0 (not included), and 10.2+ allows for Direct Server Return for loadbalancing + DSRVersion = VersionRanges{ + VersionRange{MinVersion: Version{Major: 9, Minor: 3}, MaxVersion: Version{Major: 9, Minor: math.MaxInt32}}, + VersionRange{MinVersion: Version{Major: 10, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}, + } + // HNS 9.3 through 10.0 (not included) and, 10.4+ provide support for configuring endpoints with /32 prefixes + Slash32EndpointPrefixesVersion = VersionRanges{ + VersionRange{MinVersion: Version{Major: 9, Minor: 3}, MaxVersion: Version{Major: 9, Minor: math.MaxInt32}}, + VersionRange{MinVersion: Version{Major: 10, Minor: 4}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}, + } + // HNS 9.3 through 10.0 (not included) and, 10.4+ allow for HNS ACL Policies to support protocol 252 for VXLAN + AclSupportForProtocol252Version = VersionRanges{ + VersionRange{MinVersion: Version{Major: 11, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}, + } + // HNS 12.0 allows for session affinity for loadbalancing + SessionAffinityVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 12, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} + // HNS 11.10+ supports Ipv6 dual stack. + IPv6DualStackVersion = VersionRanges{ + VersionRange{MinVersion: Version{Major: 11, Minor: 10}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}, + } + // HNS 13.0 allows for Set Policy support + SetPolicyVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 13, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} + // HNS 10.3 allows for VXLAN ports + VxlanPortVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 10, Minor: 3}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} + + //HNS 9.5 through 10.0(not included), 10.5 through 11.0(not included), 11.11 through 12.0(not included), 12.1 through 13.0(not included), 13.1+ allows for Network L4Proxy Policy support + L4ProxyPolicyVersion = VersionRanges{ + VersionRange{MinVersion: Version{Major: 9, Minor: 5}, MaxVersion: Version{Major: 9, Minor: math.MaxInt32}}, + VersionRange{MinVersion: Version{Major: 10, Minor: 5}, MaxVersion: Version{Major: 10, Minor: math.MaxInt32}}, + VersionRange{MinVersion: Version{Major: 11, Minor: 11}, MaxVersion: Version{Major: 11, Minor: math.MaxInt32}}, + VersionRange{MinVersion: Version{Major: 12, Minor: 1}, MaxVersion: Version{Major: 12, Minor: math.MaxInt32}}, + VersionRange{MinVersion: Version{Major: 13, Minor: 1}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}, + } + + //HNS 13.2 allows for L4WfpProxy Policy support + L4WfpProxyPolicyVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 13, Minor: 2}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} + + //HNS 14.0 allows for TierAcl Policy support + TierAclPolicyVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 14, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} + + //HNS 15.0 allows for NetworkACL Policy support + NetworkACLPolicyVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 15, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} + + //HNS 15.0 allows for NestedIpSet support + NestedIpSetVersion = VersionRanges{VersionRange{MinVersion: Version{Major: 15, Minor: 0}, MaxVersion: Version{Major: math.MaxInt32, Minor: math.MaxInt32}}} +) + +// GetGlobals returns the global properties of the HCN Service. +func GetGlobals() (*Globals, error) { + var version Version + err := hnsCall("GET", "/globals/version", "", &version) + if err != nil { + return nil, err + } + + globals := &Globals{ + Version: version, + } + + return globals, nil +} + +type hnsResponse struct { + Success bool + Error string + Output json.RawMessage +} + +func hnsCall(method, path, request string, returnResponse interface{}) error { + var responseBuffer *uint16 + logrus.Debugf("[%s]=>[%s] Request : %s", method, path, request) + + err := _hnsCall(method, path, request, &responseBuffer) + if err != nil { + return hcserror.New(err, "hnsCall", "") + } + response := interop.ConvertAndFreeCoTaskMemString(responseBuffer) + + hnsresponse := &hnsResponse{} + if err = json.Unmarshal([]byte(response), &hnsresponse); err != nil { + return err + } + + if !hnsresponse.Success { + return fmt.Errorf("HNS failed with error : %s", hnsresponse.Error) + } + + if len(hnsresponse.Output) == 0 { + return nil + } + + logrus.Debugf("Network Response : %s", hnsresponse.Output) + err = json.Unmarshal(hnsresponse.Output, returnResponse) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go new file mode 100644 index 0000000000000..1b434b07b3adf --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnloadbalancer.go @@ -0,0 +1,311 @@ +package hcn + +import ( + "encoding/json" + + "github.com/Microsoft/go-winio/pkg/guid" + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/sirupsen/logrus" +) + +// LoadBalancerPortMapping is associated with HostComputeLoadBalancer +type LoadBalancerPortMapping struct { + Protocol uint32 `json:",omitempty"` // EX: TCP = 6, UDP = 17 + InternalPort uint16 `json:",omitempty"` + ExternalPort uint16 `json:",omitempty"` + DistributionType LoadBalancerDistribution `json:",omitempty"` // EX: Distribute per connection = 0, distribute traffic of the same protocol per client IP = 1, distribute per client IP = 2 + Flags LoadBalancerPortMappingFlags `json:",omitempty"` +} + +// HostComputeLoadBalancer represents software load balancer. +type HostComputeLoadBalancer struct { + Id string `json:"ID,omitempty"` + HostComputeEndpoints []string `json:",omitempty"` + SourceVIP string `json:",omitempty"` + FrontendVIPs []string `json:",omitempty"` + PortMappings []LoadBalancerPortMapping `json:",omitempty"` + SchemaVersion SchemaVersion `json:",omitempty"` + Flags LoadBalancerFlags `json:",omitempty"` // 0: None, 1: EnableDirectServerReturn +} + +//LoadBalancerFlags modify settings for a loadbalancer. +type LoadBalancerFlags uint32 + +var ( + // LoadBalancerFlagsNone is the default. + LoadBalancerFlagsNone LoadBalancerFlags = 0 + // LoadBalancerFlagsDSR enables Direct Server Return (DSR) + LoadBalancerFlagsDSR LoadBalancerFlags = 1 + LoadBalancerFlagsIPv6 LoadBalancerFlags = 2 +) + +// LoadBalancerPortMappingFlags are special settings on a loadbalancer. +type LoadBalancerPortMappingFlags uint32 + +var ( + // LoadBalancerPortMappingFlagsNone is the default. + LoadBalancerPortMappingFlagsNone LoadBalancerPortMappingFlags + // LoadBalancerPortMappingFlagsILB enables internal loadbalancing. + LoadBalancerPortMappingFlagsILB LoadBalancerPortMappingFlags = 1 + // LoadBalancerPortMappingFlagsLocalRoutedVIP enables VIP access from the host. + LoadBalancerPortMappingFlagsLocalRoutedVIP LoadBalancerPortMappingFlags = 2 + // LoadBalancerPortMappingFlagsUseMux enables DSR for NodePort access of VIP. + LoadBalancerPortMappingFlagsUseMux LoadBalancerPortMappingFlags = 4 + // LoadBalancerPortMappingFlagsPreserveDIP delivers packets with destination IP as the VIP. + LoadBalancerPortMappingFlagsPreserveDIP LoadBalancerPortMappingFlags = 8 +) + +// LoadBalancerDistribution specifies how the loadbalancer distributes traffic. +type LoadBalancerDistribution uint32 + +var ( + // LoadBalancerDistributionNone is the default and loadbalances each connection to the same pod. + LoadBalancerDistributionNone LoadBalancerDistribution + // LoadBalancerDistributionSourceIPProtocol loadbalances all traffic of the same protocol from a client IP to the same pod. + LoadBalancerDistributionSourceIPProtocol LoadBalancerDistribution = 1 + // LoadBalancerDistributionSourceIP loadbalances all traffic from a client IP to the same pod. + LoadBalancerDistributionSourceIP LoadBalancerDistribution = 2 +) + +func getLoadBalancer(loadBalancerGuid guid.GUID, query string) (*HostComputeLoadBalancer, error) { + // Open loadBalancer. + var ( + loadBalancerHandle hcnLoadBalancer + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + hr := hcnOpenLoadBalancer(&loadBalancerGuid, &loadBalancerHandle, &resultBuffer) + if err := checkForErrors("hcnOpenLoadBalancer", hr, resultBuffer); err != nil { + return nil, err + } + // Query loadBalancer. + hr = hcnQueryLoadBalancerProperties(loadBalancerHandle, query, &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryLoadBalancerProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close loadBalancer. + hr = hcnCloseLoadBalancer(loadBalancerHandle) + if err := checkForErrors("hcnCloseLoadBalancer", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeLoadBalancer + var outputLoadBalancer HostComputeLoadBalancer + if err := json.Unmarshal([]byte(properties), &outputLoadBalancer); err != nil { + return nil, err + } + return &outputLoadBalancer, nil +} + +func enumerateLoadBalancers(query string) ([]HostComputeLoadBalancer, error) { + // Enumerate all LoadBalancer Guids + var ( + resultBuffer *uint16 + loadBalancerBuffer *uint16 + ) + hr := hcnEnumerateLoadBalancers(query, &loadBalancerBuffer, &resultBuffer) + if err := checkForErrors("hcnEnumerateLoadBalancers", hr, resultBuffer); err != nil { + return nil, err + } + + loadBalancers := interop.ConvertAndFreeCoTaskMemString(loadBalancerBuffer) + var loadBalancerIds []guid.GUID + if err := json.Unmarshal([]byte(loadBalancers), &loadBalancerIds); err != nil { + return nil, err + } + + var outputLoadBalancers []HostComputeLoadBalancer + for _, loadBalancerGuid := range loadBalancerIds { + loadBalancer, err := getLoadBalancer(loadBalancerGuid, query) + if err != nil { + return nil, err + } + outputLoadBalancers = append(outputLoadBalancers, *loadBalancer) + } + return outputLoadBalancers, nil +} + +func createLoadBalancer(settings string) (*HostComputeLoadBalancer, error) { + // Create new loadBalancer. + var ( + loadBalancerHandle hcnLoadBalancer + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + loadBalancerGuid := guid.GUID{} + hr := hcnCreateLoadBalancer(&loadBalancerGuid, settings, &loadBalancerHandle, &resultBuffer) + if err := checkForErrors("hcnCreateLoadBalancer", hr, resultBuffer); err != nil { + return nil, err + } + // Query loadBalancer. + hcnQuery := defaultQuery() + query, err := json.Marshal(hcnQuery) + if err != nil { + return nil, err + } + hr = hcnQueryLoadBalancerProperties(loadBalancerHandle, string(query), &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryLoadBalancerProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close loadBalancer. + hr = hcnCloseLoadBalancer(loadBalancerHandle) + if err := checkForErrors("hcnCloseLoadBalancer", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeLoadBalancer + var outputLoadBalancer HostComputeLoadBalancer + if err := json.Unmarshal([]byte(properties), &outputLoadBalancer); err != nil { + return nil, err + } + return &outputLoadBalancer, nil +} + +func deleteLoadBalancer(loadBalancerId string) error { + loadBalancerGuid, err := guid.FromString(loadBalancerId) + if err != nil { + return errInvalidLoadBalancerID + } + var resultBuffer *uint16 + hr := hcnDeleteLoadBalancer(&loadBalancerGuid, &resultBuffer) + if err := checkForErrors("hcnDeleteLoadBalancer", hr, resultBuffer); err != nil { + return err + } + return nil +} + +// ListLoadBalancers makes a call to list all available loadBalancers. +func ListLoadBalancers() ([]HostComputeLoadBalancer, error) { + hcnQuery := defaultQuery() + loadBalancers, err := ListLoadBalancersQuery(hcnQuery) + if err != nil { + return nil, err + } + return loadBalancers, nil +} + +// ListLoadBalancersQuery makes a call to query the list of available loadBalancers. +func ListLoadBalancersQuery(query HostComputeQuery) ([]HostComputeLoadBalancer, error) { + queryJson, err := json.Marshal(query) + if err != nil { + return nil, err + } + + loadBalancers, err := enumerateLoadBalancers(string(queryJson)) + if err != nil { + return nil, err + } + return loadBalancers, nil +} + +// GetLoadBalancerByID returns the LoadBalancer specified by Id. +func GetLoadBalancerByID(loadBalancerId string) (*HostComputeLoadBalancer, error) { + hcnQuery := defaultQuery() + mapA := map[string]string{"ID": loadBalancerId} + filter, err := json.Marshal(mapA) + if err != nil { + return nil, err + } + hcnQuery.Filter = string(filter) + + loadBalancers, err := ListLoadBalancersQuery(hcnQuery) + if err != nil { + return nil, err + } + if len(loadBalancers) == 0 { + return nil, LoadBalancerNotFoundError{LoadBalancerId: loadBalancerId} + } + return &loadBalancers[0], err +} + +// Create LoadBalancer. +func (loadBalancer *HostComputeLoadBalancer) Create() (*HostComputeLoadBalancer, error) { + logrus.Debugf("hcn::HostComputeLoadBalancer::Create id=%s", loadBalancer.Id) + + jsonString, err := json.Marshal(loadBalancer) + if err != nil { + return nil, err + } + + logrus.Debugf("hcn::HostComputeLoadBalancer::Create JSON: %s", jsonString) + loadBalancer, hcnErr := createLoadBalancer(string(jsonString)) + if hcnErr != nil { + return nil, hcnErr + } + return loadBalancer, nil +} + +// Delete LoadBalancer. +func (loadBalancer *HostComputeLoadBalancer) Delete() error { + logrus.Debugf("hcn::HostComputeLoadBalancer::Delete id=%s", loadBalancer.Id) + + if err := deleteLoadBalancer(loadBalancer.Id); err != nil { + return err + } + return nil +} + +// AddEndpoint add an endpoint to a LoadBalancer +func (loadBalancer *HostComputeLoadBalancer) AddEndpoint(endpoint *HostComputeEndpoint) (*HostComputeLoadBalancer, error) { + logrus.Debugf("hcn::HostComputeLoadBalancer::AddEndpoint loadBalancer=%s endpoint=%s", loadBalancer.Id, endpoint.Id) + + err := loadBalancer.Delete() + if err != nil { + return nil, err + } + + // Add Endpoint to the Existing List + loadBalancer.HostComputeEndpoints = append(loadBalancer.HostComputeEndpoints, endpoint.Id) + + return loadBalancer.Create() +} + +// RemoveEndpoint removes an endpoint from a LoadBalancer +func (loadBalancer *HostComputeLoadBalancer) RemoveEndpoint(endpoint *HostComputeEndpoint) (*HostComputeLoadBalancer, error) { + logrus.Debugf("hcn::HostComputeLoadBalancer::RemoveEndpoint loadBalancer=%s endpoint=%s", loadBalancer.Id, endpoint.Id) + + err := loadBalancer.Delete() + if err != nil { + return nil, err + } + + // Create a list of all the endpoints besides the one being removed + var endpoints []string + for _, endpointReference := range loadBalancer.HostComputeEndpoints { + if endpointReference == endpoint.Id { + continue + } + endpoints = append(endpoints, endpointReference) + } + loadBalancer.HostComputeEndpoints = endpoints + return loadBalancer.Create() +} + +// AddLoadBalancer for the specified endpoints +func AddLoadBalancer(endpoints []HostComputeEndpoint, flags LoadBalancerFlags, portMappingFlags LoadBalancerPortMappingFlags, sourceVIP string, frontendVIPs []string, protocol uint16, internalPort uint16, externalPort uint16) (*HostComputeLoadBalancer, error) { + logrus.Debugf("hcn::HostComputeLoadBalancer::AddLoadBalancer endpointId=%v, LoadBalancerFlags=%v, LoadBalancerPortMappingFlags=%v, sourceVIP=%s, frontendVIPs=%v, protocol=%v, internalPort=%v, externalPort=%v", endpoints, flags, portMappingFlags, sourceVIP, frontendVIPs, protocol, internalPort, externalPort) + + loadBalancer := &HostComputeLoadBalancer{ + SourceVIP: sourceVIP, + PortMappings: []LoadBalancerPortMapping{ + { + Protocol: uint32(protocol), + InternalPort: internalPort, + ExternalPort: externalPort, + Flags: portMappingFlags, + }, + }, + FrontendVIPs: frontendVIPs, + SchemaVersion: SchemaVersion{ + Major: 2, + Minor: 0, + }, + Flags: flags, + } + + for _, endpoint := range endpoints { + loadBalancer.HostComputeEndpoints = append(loadBalancer.HostComputeEndpoints, endpoint.Id) + } + + return loadBalancer.Create() +} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go new file mode 100644 index 0000000000000..d2ef2296099ab --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnnamespace.go @@ -0,0 +1,446 @@ +package hcn + +import ( + "encoding/json" + "os" + "syscall" + + "github.com/Microsoft/go-winio/pkg/guid" + icni "github.com/Microsoft/hcsshim/internal/cni" + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/Microsoft/hcsshim/internal/regstate" + "github.com/Microsoft/hcsshim/internal/runhcs" + "github.com/sirupsen/logrus" +) + +// NamespaceResourceEndpoint represents an Endpoint attached to a Namespace. +type NamespaceResourceEndpoint struct { + Id string `json:"ID,"` +} + +// NamespaceResourceContainer represents a Container attached to a Namespace. +type NamespaceResourceContainer struct { + Id string `json:"ID,"` +} + +// NamespaceResourceType determines whether the Namespace resource is a Container or Endpoint. +type NamespaceResourceType string + +var ( + // NamespaceResourceTypeContainer are contianers associated with a Namespace. + NamespaceResourceTypeContainer NamespaceResourceType = "Container" + // NamespaceResourceTypeEndpoint are endpoints associated with a Namespace. + NamespaceResourceTypeEndpoint NamespaceResourceType = "Endpoint" +) + +// NamespaceResource is associated with a namespace +type NamespaceResource struct { + Type NamespaceResourceType `json:","` // Container, Endpoint + Data json.RawMessage `json:","` +} + +// NamespaceType determines whether the Namespace is for a Host or Guest +type NamespaceType string + +var ( + // NamespaceTypeHost are host namespaces. + NamespaceTypeHost NamespaceType = "Host" + // NamespaceTypeHostDefault are host namespaces in the default compartment. + NamespaceTypeHostDefault NamespaceType = "HostDefault" + // NamespaceTypeGuest are guest namespaces. + NamespaceTypeGuest NamespaceType = "Guest" + // NamespaceTypeGuestDefault are guest namespaces in the default compartment. + NamespaceTypeGuestDefault NamespaceType = "GuestDefault" +) + +// HostComputeNamespace represents a namespace (AKA compartment) in +type HostComputeNamespace struct { + Id string `json:"ID,omitempty"` + NamespaceId uint32 `json:",omitempty"` + Type NamespaceType `json:",omitempty"` // Host, HostDefault, Guest, GuestDefault + Resources []NamespaceResource `json:",omitempty"` + SchemaVersion SchemaVersion `json:",omitempty"` +} + +// ModifyNamespaceSettingRequest is the structure used to send request to modify a namespace. +// Used to Add/Remove an endpoints and containers to/from a namespace. +type ModifyNamespaceSettingRequest struct { + ResourceType NamespaceResourceType `json:",omitempty"` // Container, Endpoint + RequestType RequestType `json:",omitempty"` // Add, Remove, Update, Refresh + Settings json.RawMessage `json:",omitempty"` +} + +func getNamespace(namespaceGuid guid.GUID, query string) (*HostComputeNamespace, error) { + // Open namespace. + var ( + namespaceHandle hcnNamespace + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + hr := hcnOpenNamespace(&namespaceGuid, &namespaceHandle, &resultBuffer) + if err := checkForErrors("hcnOpenNamespace", hr, resultBuffer); err != nil { + return nil, err + } + // Query namespace. + hr = hcnQueryNamespaceProperties(namespaceHandle, query, &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryNamespaceProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close namespace. + hr = hcnCloseNamespace(namespaceHandle) + if err := checkForErrors("hcnCloseNamespace", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeNamespace + var outputNamespace HostComputeNamespace + if err := json.Unmarshal([]byte(properties), &outputNamespace); err != nil { + return nil, err + } + return &outputNamespace, nil +} + +func enumerateNamespaces(query string) ([]HostComputeNamespace, error) { + // Enumerate all Namespace Guids + var ( + resultBuffer *uint16 + namespaceBuffer *uint16 + ) + hr := hcnEnumerateNamespaces(query, &namespaceBuffer, &resultBuffer) + if err := checkForErrors("hcnEnumerateNamespaces", hr, resultBuffer); err != nil { + return nil, err + } + + namespaces := interop.ConvertAndFreeCoTaskMemString(namespaceBuffer) + var namespaceIds []guid.GUID + if err := json.Unmarshal([]byte(namespaces), &namespaceIds); err != nil { + return nil, err + } + + var outputNamespaces []HostComputeNamespace + for _, namespaceGuid := range namespaceIds { + namespace, err := getNamespace(namespaceGuid, query) + if err != nil { + return nil, err + } + outputNamespaces = append(outputNamespaces, *namespace) + } + return outputNamespaces, nil +} + +func createNamespace(settings string) (*HostComputeNamespace, error) { + // Create new namespace. + var ( + namespaceHandle hcnNamespace + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + namespaceGuid := guid.GUID{} + hr := hcnCreateNamespace(&namespaceGuid, settings, &namespaceHandle, &resultBuffer) + if err := checkForErrors("hcnCreateNamespace", hr, resultBuffer); err != nil { + return nil, err + } + // Query namespace. + hcnQuery := defaultQuery() + query, err := json.Marshal(hcnQuery) + if err != nil { + return nil, err + } + hr = hcnQueryNamespaceProperties(namespaceHandle, string(query), &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryNamespaceProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close namespace. + hr = hcnCloseNamespace(namespaceHandle) + if err := checkForErrors("hcnCloseNamespace", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeNamespace + var outputNamespace HostComputeNamespace + if err := json.Unmarshal([]byte(properties), &outputNamespace); err != nil { + return nil, err + } + return &outputNamespace, nil +} + +func modifyNamespace(namespaceId string, settings string) (*HostComputeNamespace, error) { + namespaceGuid, err := guid.FromString(namespaceId) + if err != nil { + return nil, errInvalidNamespaceID + } + // Open namespace. + var ( + namespaceHandle hcnNamespace + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + hr := hcnOpenNamespace(&namespaceGuid, &namespaceHandle, &resultBuffer) + if err := checkForErrors("hcnOpenNamespace", hr, resultBuffer); err != nil { + return nil, err + } + // Modify namespace. + hr = hcnModifyNamespace(namespaceHandle, settings, &resultBuffer) + if err := checkForErrors("hcnModifyNamespace", hr, resultBuffer); err != nil { + return nil, err + } + // Query namespace. + hcnQuery := defaultQuery() + query, err := json.Marshal(hcnQuery) + if err != nil { + return nil, err + } + hr = hcnQueryNamespaceProperties(namespaceHandle, string(query), &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryNamespaceProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close namespace. + hr = hcnCloseNamespace(namespaceHandle) + if err := checkForErrors("hcnCloseNamespace", hr, nil); err != nil { + return nil, err + } + // Convert output to Namespace + var outputNamespace HostComputeNamespace + if err := json.Unmarshal([]byte(properties), &outputNamespace); err != nil { + return nil, err + } + return &outputNamespace, nil +} + +func deleteNamespace(namespaceId string) error { + namespaceGuid, err := guid.FromString(namespaceId) + if err != nil { + return errInvalidNamespaceID + } + var resultBuffer *uint16 + hr := hcnDeleteNamespace(&namespaceGuid, &resultBuffer) + if err := checkForErrors("hcnDeleteNamespace", hr, resultBuffer); err != nil { + return err + } + return nil +} + +// ListNamespaces makes a call to list all available namespaces. +func ListNamespaces() ([]HostComputeNamespace, error) { + hcnQuery := defaultQuery() + namespaces, err := ListNamespacesQuery(hcnQuery) + if err != nil { + return nil, err + } + return namespaces, nil +} + +// ListNamespacesQuery makes a call to query the list of available namespaces. +func ListNamespacesQuery(query HostComputeQuery) ([]HostComputeNamespace, error) { + queryJson, err := json.Marshal(query) + if err != nil { + return nil, err + } + + namespaces, err := enumerateNamespaces(string(queryJson)) + if err != nil { + return nil, err + } + return namespaces, nil +} + +// GetNamespaceByID returns the Namespace specified by Id. +func GetNamespaceByID(namespaceId string) (*HostComputeNamespace, error) { + hcnQuery := defaultQuery() + mapA := map[string]string{"ID": namespaceId} + filter, err := json.Marshal(mapA) + if err != nil { + return nil, err + } + hcnQuery.Filter = string(filter) + + namespaces, err := ListNamespacesQuery(hcnQuery) + if err != nil { + return nil, err + } + if len(namespaces) == 0 { + return nil, NamespaceNotFoundError{NamespaceID: namespaceId} + } + + return &namespaces[0], err +} + +// GetNamespaceEndpointIds returns the endpoints of the Namespace specified by Id. +func GetNamespaceEndpointIds(namespaceId string) ([]string, error) { + namespace, err := GetNamespaceByID(namespaceId) + if err != nil { + return nil, err + } + var endpointsIds []string + for _, resource := range namespace.Resources { + if resource.Type == "Endpoint" { + var endpointResource NamespaceResourceEndpoint + if err := json.Unmarshal([]byte(resource.Data), &endpointResource); err != nil { + return nil, err + } + endpointsIds = append(endpointsIds, endpointResource.Id) + } + } + return endpointsIds, nil +} + +// GetNamespaceContainerIds returns the containers of the Namespace specified by Id. +func GetNamespaceContainerIds(namespaceId string) ([]string, error) { + namespace, err := GetNamespaceByID(namespaceId) + if err != nil { + return nil, err + } + var containerIds []string + for _, resource := range namespace.Resources { + if resource.Type == "Container" { + var contaienrResource NamespaceResourceContainer + if err := json.Unmarshal([]byte(resource.Data), &contaienrResource); err != nil { + return nil, err + } + containerIds = append(containerIds, contaienrResource.Id) + } + } + return containerIds, nil +} + +// NewNamespace creates a new Namespace object +func NewNamespace(nsType NamespaceType) *HostComputeNamespace { + return &HostComputeNamespace{ + Type: nsType, + SchemaVersion: V2SchemaVersion(), + } +} + +// Create Namespace. +func (namespace *HostComputeNamespace) Create() (*HostComputeNamespace, error) { + logrus.Debugf("hcn::HostComputeNamespace::Create id=%s", namespace.Id) + + jsonString, err := json.Marshal(namespace) + if err != nil { + return nil, err + } + + logrus.Debugf("hcn::HostComputeNamespace::Create JSON: %s", jsonString) + namespace, hcnErr := createNamespace(string(jsonString)) + if hcnErr != nil { + return nil, hcnErr + } + return namespace, nil +} + +// Delete Namespace. +func (namespace *HostComputeNamespace) Delete() error { + logrus.Debugf("hcn::HostComputeNamespace::Delete id=%s", namespace.Id) + + if err := deleteNamespace(namespace.Id); err != nil { + return err + } + return nil +} + +// Sync Namespace endpoints with the appropriate sandbox container holding the +// network namespace open. If no sandbox container is found for this namespace +// this method is determined to be a success and will not return an error in +// this case. If the sandbox container is found and a sync is initiated any +// failures will be returned via this method. +// +// This call initiates a sync between endpoints and the matching UtilityVM +// hosting those endpoints. It is safe to call for any `NamespaceType` but +// `NamespaceTypeGuest` is the only case when a sync will actually occur. For +// `NamespaceTypeHost` the process container will be automatically synchronized +// when the the endpoint is added via `AddNamespaceEndpoint`. +// +// Note: This method sync's both additions and removals of endpoints from a +// `NamespaceTypeGuest` namespace. +func (namespace *HostComputeNamespace) Sync() error { + logrus.WithField("id", namespace.Id).Debugf("hcs::HostComputeNamespace::Sync") + + // We only attempt a sync for namespace guest. + if namespace.Type != NamespaceTypeGuest { + return nil + } + + // Look in the registry for the key to map from namespace id to pod-id + cfg, err := icni.LoadPersistedNamespaceConfig(namespace.Id) + if err != nil { + if regstate.IsNotFoundError(err) { + return nil + } + return err + } + req := runhcs.VMRequest{ + ID: cfg.ContainerID, + Op: runhcs.OpSyncNamespace, + } + shimPath := runhcs.VMPipePath(cfg.HostUniqueID) + if err := runhcs.IssueVMRequest(shimPath, &req); err != nil { + // The shim is likey gone. Simply ignore the sync as if it didn't exist. + if perr, ok := err.(*os.PathError); ok && perr.Err == syscall.ERROR_FILE_NOT_FOUND { + // Remove the reg key there is no point to try again + _ = cfg.Remove() + return nil + } + f := map[string]interface{}{ + "id": namespace.Id, + "container-id": cfg.ContainerID, + } + logrus.WithFields(f). + WithError(err). + Debugf("hcs::HostComputeNamespace::Sync failed to connect to shim pipe: '%s'", shimPath) + return err + } + return nil +} + +// ModifyNamespaceSettings updates the Endpoints/Containers of a Namespace. +func ModifyNamespaceSettings(namespaceId string, request *ModifyNamespaceSettingRequest) error { + logrus.Debugf("hcn::HostComputeNamespace::ModifyNamespaceSettings id=%s", namespaceId) + + namespaceSettings, err := json.Marshal(request) + if err != nil { + return err + } + + _, err = modifyNamespace(namespaceId, string(namespaceSettings)) + if err != nil { + return err + } + return nil +} + +// AddNamespaceEndpoint adds an endpoint to a Namespace. +func AddNamespaceEndpoint(namespaceId string, endpointId string) error { + logrus.Debugf("hcn::HostComputeEndpoint::AddNamespaceEndpoint id=%s", endpointId) + + mapA := map[string]string{"EndpointId": endpointId} + settingsJson, err := json.Marshal(mapA) + if err != nil { + return err + } + requestMessage := &ModifyNamespaceSettingRequest{ + ResourceType: NamespaceResourceTypeEndpoint, + RequestType: RequestTypeAdd, + Settings: settingsJson, + } + + return ModifyNamespaceSettings(namespaceId, requestMessage) +} + +// RemoveNamespaceEndpoint removes an endpoint from a Namespace. +func RemoveNamespaceEndpoint(namespaceId string, endpointId string) error { + logrus.Debugf("hcn::HostComputeNamespace::RemoveNamespaceEndpoint id=%s", endpointId) + + mapA := map[string]string{"EndpointId": endpointId} + settingsJson, err := json.Marshal(mapA) + if err != nil { + return err + } + requestMessage := &ModifyNamespaceSettingRequest{ + ResourceType: NamespaceResourceTypeEndpoint, + RequestType: RequestTypeRemove, + Settings: settingsJson, + } + + return ModifyNamespaceSettings(namespaceId, requestMessage) +} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go new file mode 100644 index 0000000000000..c36b136387a99 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnnetwork.go @@ -0,0 +1,462 @@ +package hcn + +import ( + "encoding/json" + "errors" + + "github.com/Microsoft/go-winio/pkg/guid" + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/sirupsen/logrus" +) + +// Route is associated with a subnet. +type Route struct { + NextHop string `json:",omitempty"` + DestinationPrefix string `json:",omitempty"` + Metric uint16 `json:",omitempty"` +} + +// Subnet is associated with a Ipam. +type Subnet struct { + IpAddressPrefix string `json:",omitempty"` + Policies []json.RawMessage `json:",omitempty"` + Routes []Route `json:",omitempty"` +} + +// Ipam (Internet Protocol Address Management) is associated with a network +// and represents the address space(s) of a network. +type Ipam struct { + Type string `json:",omitempty"` // Ex: Static, DHCP + Subnets []Subnet `json:",omitempty"` +} + +// MacRange is associated with MacPool and respresents the start and end addresses. +type MacRange struct { + StartMacAddress string `json:",omitempty"` + EndMacAddress string `json:",omitempty"` +} + +// MacPool is associated with a network and represents pool of MacRanges. +type MacPool struct { + Ranges []MacRange `json:",omitempty"` +} + +// Dns (Domain Name System is associated with a network). +type Dns struct { + Domain string `json:",omitempty"` + Search []string `json:",omitempty"` + ServerList []string `json:",omitempty"` + Options []string `json:",omitempty"` +} + +// NetworkType are various networks. +type NetworkType string + +// NetworkType const +const ( + NAT NetworkType = "NAT" + Transparent NetworkType = "Transparent" + L2Bridge NetworkType = "L2Bridge" + L2Tunnel NetworkType = "L2Tunnel" + ICS NetworkType = "ICS" + Private NetworkType = "Private" + Overlay NetworkType = "Overlay" +) + +// NetworkFlags are various network flags. +type NetworkFlags uint32 + +// NetworkFlags const +const ( + None NetworkFlags = 0 + EnableNonPersistent NetworkFlags = 8 +) + +// HostComputeNetwork represents a network +type HostComputeNetwork struct { + Id string `json:"ID,omitempty"` + Name string `json:",omitempty"` + Type NetworkType `json:",omitempty"` + Policies []NetworkPolicy `json:",omitempty"` + MacPool MacPool `json:",omitempty"` + Dns Dns `json:",omitempty"` + Ipams []Ipam `json:",omitempty"` + Flags NetworkFlags `json:",omitempty"` // 0: None + Health Health `json:",omitempty"` + SchemaVersion SchemaVersion `json:",omitempty"` +} + +// NetworkResourceType are the 3 different Network settings resources. +type NetworkResourceType string + +var ( + // NetworkResourceTypePolicy is for Network's policies. Ex: RemoteSubnet + NetworkResourceTypePolicy NetworkResourceType = "Policy" + // NetworkResourceTypeDNS is for Network's DNS settings. + NetworkResourceTypeDNS NetworkResourceType = "DNS" + // NetworkResourceTypeExtension is for Network's extension settings. + NetworkResourceTypeExtension NetworkResourceType = "Extension" +) + +// ModifyNetworkSettingRequest is the structure used to send request to modify an network. +// Used to update DNS/extension/policy on an network. +type ModifyNetworkSettingRequest struct { + ResourceType NetworkResourceType `json:",omitempty"` // Policy, DNS, Extension + RequestType RequestType `json:",omitempty"` // Add, Remove, Update, Refresh + Settings json.RawMessage `json:",omitempty"` +} + +type PolicyNetworkRequest struct { + Policies []NetworkPolicy `json:",omitempty"` +} + +func getNetwork(networkGuid guid.GUID, query string) (*HostComputeNetwork, error) { + // Open network. + var ( + networkHandle hcnNetwork + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + hr := hcnOpenNetwork(&networkGuid, &networkHandle, &resultBuffer) + if err := checkForErrors("hcnOpenNetwork", hr, resultBuffer); err != nil { + return nil, err + } + // Query network. + hr = hcnQueryNetworkProperties(networkHandle, query, &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryNetworkProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close network. + hr = hcnCloseNetwork(networkHandle) + if err := checkForErrors("hcnCloseNetwork", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeNetwork + var outputNetwork HostComputeNetwork + + // If HNS sets the network type to NAT (i.e. '0' in HNS.Schema.Network.NetworkMode), + // the value will be omitted from the JSON blob. We therefore need to initialize NAT here before + // unmarshaling the JSON blob. + outputNetwork.Type = NAT + + if err := json.Unmarshal([]byte(properties), &outputNetwork); err != nil { + return nil, err + } + return &outputNetwork, nil +} + +func enumerateNetworks(query string) ([]HostComputeNetwork, error) { + // Enumerate all Network Guids + var ( + resultBuffer *uint16 + networkBuffer *uint16 + ) + hr := hcnEnumerateNetworks(query, &networkBuffer, &resultBuffer) + if err := checkForErrors("hcnEnumerateNetworks", hr, resultBuffer); err != nil { + return nil, err + } + + networks := interop.ConvertAndFreeCoTaskMemString(networkBuffer) + var networkIds []guid.GUID + if err := json.Unmarshal([]byte(networks), &networkIds); err != nil { + return nil, err + } + + var outputNetworks []HostComputeNetwork + for _, networkGuid := range networkIds { + network, err := getNetwork(networkGuid, query) + if err != nil { + return nil, err + } + outputNetworks = append(outputNetworks, *network) + } + return outputNetworks, nil +} + +func createNetwork(settings string) (*HostComputeNetwork, error) { + // Create new network. + var ( + networkHandle hcnNetwork + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + networkGuid := guid.GUID{} + hr := hcnCreateNetwork(&networkGuid, settings, &networkHandle, &resultBuffer) + if err := checkForErrors("hcnCreateNetwork", hr, resultBuffer); err != nil { + return nil, err + } + // Query network. + hcnQuery := defaultQuery() + query, err := json.Marshal(hcnQuery) + if err != nil { + return nil, err + } + hr = hcnQueryNetworkProperties(networkHandle, string(query), &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryNetworkProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close network. + hr = hcnCloseNetwork(networkHandle) + if err := checkForErrors("hcnCloseNetwork", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeNetwork + var outputNetwork HostComputeNetwork + + // If HNS sets the network type to NAT (i.e. '0' in HNS.Schema.Network.NetworkMode), + // the value will be omitted from the JSON blob. We therefore need to initialize NAT here before + // unmarshaling the JSON blob. + outputNetwork.Type = NAT + + if err := json.Unmarshal([]byte(properties), &outputNetwork); err != nil { + return nil, err + } + return &outputNetwork, nil +} + +func modifyNetwork(networkId string, settings string) (*HostComputeNetwork, error) { + networkGuid, err := guid.FromString(networkId) + if err != nil { + return nil, errInvalidNetworkID + } + // Open Network + var ( + networkHandle hcnNetwork + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + hr := hcnOpenNetwork(&networkGuid, &networkHandle, &resultBuffer) + if err := checkForErrors("hcnOpenNetwork", hr, resultBuffer); err != nil { + return nil, err + } + // Modify Network + hr = hcnModifyNetwork(networkHandle, settings, &resultBuffer) + if err := checkForErrors("hcnModifyNetwork", hr, resultBuffer); err != nil { + return nil, err + } + // Query network. + hcnQuery := defaultQuery() + query, err := json.Marshal(hcnQuery) + if err != nil { + return nil, err + } + hr = hcnQueryNetworkProperties(networkHandle, string(query), &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryNetworkProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close network. + hr = hcnCloseNetwork(networkHandle) + if err := checkForErrors("hcnCloseNetwork", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeNetwork + var outputNetwork HostComputeNetwork + + // If HNS sets the network type to NAT (i.e. '0' in HNS.Schema.Network.NetworkMode), + // the value will be omitted from the JSON blob. We therefore need to initialize NAT here before + // unmarshaling the JSON blob. + outputNetwork.Type = NAT + + if err := json.Unmarshal([]byte(properties), &outputNetwork); err != nil { + return nil, err + } + return &outputNetwork, nil +} + +func deleteNetwork(networkId string) error { + networkGuid, err := guid.FromString(networkId) + if err != nil { + return errInvalidNetworkID + } + var resultBuffer *uint16 + hr := hcnDeleteNetwork(&networkGuid, &resultBuffer) + if err := checkForErrors("hcnDeleteNetwork", hr, resultBuffer); err != nil { + return err + } + return nil +} + +// ListNetworks makes a call to list all available networks. +func ListNetworks() ([]HostComputeNetwork, error) { + hcnQuery := defaultQuery() + networks, err := ListNetworksQuery(hcnQuery) + if err != nil { + return nil, err + } + return networks, nil +} + +// ListNetworksQuery makes a call to query the list of available networks. +func ListNetworksQuery(query HostComputeQuery) ([]HostComputeNetwork, error) { + queryJson, err := json.Marshal(query) + if err != nil { + return nil, err + } + + networks, err := enumerateNetworks(string(queryJson)) + if err != nil { + return nil, err + } + return networks, nil +} + +// GetNetworkByID returns the network specified by Id. +func GetNetworkByID(networkID string) (*HostComputeNetwork, error) { + hcnQuery := defaultQuery() + mapA := map[string]string{"ID": networkID} + filter, err := json.Marshal(mapA) + if err != nil { + return nil, err + } + hcnQuery.Filter = string(filter) + + networks, err := ListNetworksQuery(hcnQuery) + if err != nil { + return nil, err + } + if len(networks) == 0 { + return nil, NetworkNotFoundError{NetworkID: networkID} + } + return &networks[0], err +} + +// GetNetworkByName returns the network specified by Name. +func GetNetworkByName(networkName string) (*HostComputeNetwork, error) { + hcnQuery := defaultQuery() + mapA := map[string]string{"Name": networkName} + filter, err := json.Marshal(mapA) + if err != nil { + return nil, err + } + hcnQuery.Filter = string(filter) + + networks, err := ListNetworksQuery(hcnQuery) + if err != nil { + return nil, err + } + if len(networks) == 0 { + return nil, NetworkNotFoundError{NetworkName: networkName} + } + return &networks[0], err +} + +// Create Network. +func (network *HostComputeNetwork) Create() (*HostComputeNetwork, error) { + logrus.Debugf("hcn::HostComputeNetwork::Create id=%s", network.Id) + for _, ipam := range network.Ipams { + for _, subnet := range ipam.Subnets { + if subnet.IpAddressPrefix != "" { + hasDefault := false + for _, route := range subnet.Routes { + if route.NextHop == "" { + return nil, errors.New("network create error, subnet has address prefix but no gateway specified") + } + if route.DestinationPrefix == "0.0.0.0/0" || route.DestinationPrefix == "::/0" { + hasDefault = true + } + } + if !hasDefault { + return nil, errors.New("network create error, no default gateway") + } + } + } + } + + jsonString, err := json.Marshal(network) + if err != nil { + return nil, err + } + + logrus.Debugf("hcn::HostComputeNetwork::Create JSON: %s", jsonString) + network, hcnErr := createNetwork(string(jsonString)) + if hcnErr != nil { + return nil, hcnErr + } + return network, nil +} + +// Delete Network. +func (network *HostComputeNetwork) Delete() error { + logrus.Debugf("hcn::HostComputeNetwork::Delete id=%s", network.Id) + + if err := deleteNetwork(network.Id); err != nil { + return err + } + return nil +} + +// ModifyNetworkSettings updates the Policy for a network. +func (network *HostComputeNetwork) ModifyNetworkSettings(request *ModifyNetworkSettingRequest) error { + logrus.Debugf("hcn::HostComputeNetwork::ModifyNetworkSettings id=%s", network.Id) + + networkSettingsRequest, err := json.Marshal(request) + if err != nil { + return err + } + + _, err = modifyNetwork(network.Id, string(networkSettingsRequest)) + if err != nil { + return err + } + return nil +} + +// AddPolicy applies a Policy (ex: RemoteSubnet) on the Network. +func (network *HostComputeNetwork) AddPolicy(networkPolicy PolicyNetworkRequest) error { + logrus.Debugf("hcn::HostComputeNetwork::AddPolicy id=%s", network.Id) + + settingsJson, err := json.Marshal(networkPolicy) + if err != nil { + return err + } + requestMessage := &ModifyNetworkSettingRequest{ + ResourceType: NetworkResourceTypePolicy, + RequestType: RequestTypeAdd, + Settings: settingsJson, + } + + return network.ModifyNetworkSettings(requestMessage) +} + +// RemovePolicy removes a Policy (ex: RemoteSubnet) from the Network. +func (network *HostComputeNetwork) RemovePolicy(networkPolicy PolicyNetworkRequest) error { + logrus.Debugf("hcn::HostComputeNetwork::RemovePolicy id=%s", network.Id) + + settingsJson, err := json.Marshal(networkPolicy) + if err != nil { + return err + } + requestMessage := &ModifyNetworkSettingRequest{ + ResourceType: NetworkResourceTypePolicy, + RequestType: RequestTypeRemove, + Settings: settingsJson, + } + + return network.ModifyNetworkSettings(requestMessage) +} + +// CreateEndpoint creates an endpoint on the Network. +func (network *HostComputeNetwork) CreateEndpoint(endpoint *HostComputeEndpoint) (*HostComputeEndpoint, error) { + isRemote := endpoint.Flags&EndpointFlagsRemoteEndpoint != 0 + logrus.Debugf("hcn::HostComputeNetwork::CreatEndpoint, networkId=%s remote=%t", network.Id, isRemote) + + endpoint.HostComputeNetwork = network.Id + endpointSettings, err := json.Marshal(endpoint) + if err != nil { + return nil, err + } + newEndpoint, err := createEndpoint(network.Id, string(endpointSettings)) + if err != nil { + return nil, err + } + return newEndpoint, nil +} + +// CreateRemoteEndpoint creates a remote endpoint on the Network. +func (network *HostComputeNetwork) CreateRemoteEndpoint(endpoint *HostComputeEndpoint) (*HostComputeEndpoint, error) { + endpoint.Flags = EndpointFlagsRemoteEndpoint | endpoint.Flags + return network.CreateEndpoint(endpoint) +} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go new file mode 100644 index 0000000000000..c2aa599f3394b --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnpolicy.go @@ -0,0 +1,344 @@ +package hcn + +import ( + "encoding/json" +) + +// EndpointPolicyType are the potential Policies that apply to Endpoints. +type EndpointPolicyType string + +// EndpointPolicyType const +const ( + PortMapping EndpointPolicyType = "PortMapping" + ACL EndpointPolicyType = "ACL" + QOS EndpointPolicyType = "QOS" + L2Driver EndpointPolicyType = "L2Driver" + OutBoundNAT EndpointPolicyType = "OutBoundNAT" + SDNRoute EndpointPolicyType = "SDNRoute" + L4Proxy EndpointPolicyType = "L4Proxy" + L4WFPPROXY EndpointPolicyType = "L4WFPPROXY" + PortName EndpointPolicyType = "PortName" + EncapOverhead EndpointPolicyType = "EncapOverhead" + IOV EndpointPolicyType = "Iov" + // Endpoint and Network have InterfaceConstraint and ProviderAddress + NetworkProviderAddress EndpointPolicyType = "ProviderAddress" + NetworkInterfaceConstraint EndpointPolicyType = "InterfaceConstraint" + TierAcl EndpointPolicyType = "TierAcl" +) + +// EndpointPolicy is a collection of Policy settings for an Endpoint. +type EndpointPolicy struct { + Type EndpointPolicyType `json:""` + Settings json.RawMessage `json:",omitempty"` +} + +// NetworkPolicyType are the potential Policies that apply to Networks. +type NetworkPolicyType string + +// NetworkPolicyType const +const ( + SourceMacAddress NetworkPolicyType = "SourceMacAddress" + NetAdapterName NetworkPolicyType = "NetAdapterName" + VSwitchExtension NetworkPolicyType = "VSwitchExtension" + DrMacAddress NetworkPolicyType = "DrMacAddress" + AutomaticDNS NetworkPolicyType = "AutomaticDNS" + InterfaceConstraint NetworkPolicyType = "InterfaceConstraint" + ProviderAddress NetworkPolicyType = "ProviderAddress" + RemoteSubnetRoute NetworkPolicyType = "RemoteSubnetRoute" + VxlanPort NetworkPolicyType = "VxlanPort" + HostRoute NetworkPolicyType = "HostRoute" + SetPolicy NetworkPolicyType = "SetPolicy" + NetworkL4Proxy NetworkPolicyType = "L4Proxy" + LayerConstraint NetworkPolicyType = "LayerConstraint" + NetworkACL NetworkPolicyType = "NetworkACL" +) + +// NetworkPolicy is a collection of Policy settings for a Network. +type NetworkPolicy struct { + Type NetworkPolicyType `json:""` + Settings json.RawMessage `json:",omitempty"` +} + +// SubnetPolicyType are the potential Policies that apply to Subnets. +type SubnetPolicyType string + +// SubnetPolicyType const +const ( + VLAN SubnetPolicyType = "VLAN" + VSID SubnetPolicyType = "VSID" +) + +// SubnetPolicy is a collection of Policy settings for a Subnet. +type SubnetPolicy struct { + Type SubnetPolicyType `json:""` + Settings json.RawMessage `json:",omitempty"` +} + +// NatFlags are flags for portmappings. +type NatFlags uint32 + +const ( + NatFlagsNone NatFlags = iota + NatFlagsLocalRoutedVip + NatFlagsIPv6 +) + +/// Endpoint Policy objects + +// PortMappingPolicySetting defines Port Mapping (NAT) +type PortMappingPolicySetting struct { + Protocol uint32 `json:",omitempty"` // EX: TCP = 6, UDP = 17 + InternalPort uint16 `json:",omitempty"` + ExternalPort uint16 `json:",omitempty"` + VIP string `json:",omitempty"` + Flags NatFlags `json:",omitempty"` +} + +// ActionType associated with ACLs. Value is either Allow or Block. +type ActionType string + +// DirectionType associated with ACLs. Value is either In or Out. +type DirectionType string + +// RuleType associated with ACLs. Value is either Host (WFP) or Switch (VFP). +type RuleType string + +const ( + // Allow traffic + ActionTypeAllow ActionType = "Allow" + // Block traffic + ActionTypeBlock ActionType = "Block" + // Pass traffic + ActionTypePass ActionType = "Pass" + + // In is traffic coming to the Endpoint + DirectionTypeIn DirectionType = "In" + // Out is traffic leaving the Endpoint + DirectionTypeOut DirectionType = "Out" + + // Host creates WFP (Windows Firewall) rules + RuleTypeHost RuleType = "Host" + // Switch creates VFP (Virtual Filter Platform) rules + RuleTypeSwitch RuleType = "Switch" +) + +// AclPolicySetting creates firewall rules on an endpoint +type AclPolicySetting struct { + Protocols string `json:",omitempty"` // EX: 6 (TCP), 17 (UDP), 1 (ICMPv4), 58 (ICMPv6), 2 (IGMP) + Action ActionType `json:","` + Direction DirectionType `json:","` + LocalAddresses string `json:",omitempty"` + RemoteAddresses string `json:",omitempty"` + LocalPorts string `json:",omitempty"` + RemotePorts string `json:",omitempty"` + RuleType RuleType `json:",omitempty"` + Priority uint16 `json:",omitempty"` +} + +// QosPolicySetting sets Quality of Service bandwidth caps on an Endpoint. +type QosPolicySetting struct { + MaximumOutgoingBandwidthInBytes uint64 +} + +// OutboundNatPolicySetting sets outbound Network Address Translation on an Endpoint. +type OutboundNatPolicySetting struct { + VirtualIP string `json:",omitempty"` + Exceptions []string `json:",omitempty"` + Destinations []string `json:",omitempty"` + Flags NatFlags `json:",omitempty"` +} + +// SDNRoutePolicySetting sets SDN Route on an Endpoint. +type SDNRoutePolicySetting struct { + DestinationPrefix string `json:",omitempty"` + NextHop string `json:",omitempty"` + NeedEncap bool `json:",omitempty"` +} + +// NetworkACLPolicySetting creates ACL rules on a network +type NetworkACLPolicySetting struct { + Protocols string `json:",omitempty"` // EX: 6 (TCP), 17 (UDP), 1 (ICMPv4), 58 (ICMPv6), 2 (IGMP) + Action ActionType `json:","` + Direction DirectionType `json:","` + LocalAddresses string `json:",omitempty"` + RemoteAddresses string `json:",omitempty"` + LocalPorts string `json:",omitempty"` + RemotePorts string `json:",omitempty"` + RuleType RuleType `json:",omitempty"` + Priority uint16 `json:",omitempty"` +} + +// FiveTuple is nested in L4ProxyPolicySetting for WFP support. +type FiveTuple struct { + Protocols string `json:",omitempty"` + LocalAddresses string `json:",omitempty"` + RemoteAddresses string `json:",omitempty"` + LocalPorts string `json:",omitempty"` + RemotePorts string `json:",omitempty"` + Priority uint16 `json:",omitempty"` +} + +// ProxyExceptions exempts traffic to IpAddresses and Ports +type ProxyExceptions struct { + IpAddressExceptions []string `json:",omitempty"` + PortExceptions []string `json:",omitempty"` +} + +// L4WfpProxyPolicySetting sets Layer-4 Proxy on an endpoint. +type L4WfpProxyPolicySetting struct { + InboundProxyPort string `json:",omitempty"` + OutboundProxyPort string `json:",omitempty"` + FilterTuple FiveTuple `json:",omitempty"` + UserSID string `json:",omitempty"` + InboundExceptions ProxyExceptions `json:",omitempty"` + OutboundExceptions ProxyExceptions `json:",omitempty"` +} + +// PortnameEndpointPolicySetting sets the port name for an endpoint. +type PortnameEndpointPolicySetting struct { + Name string `json:",omitempty"` +} + +// EncapOverheadEndpointPolicySetting sets the encap overhead for an endpoint. +type EncapOverheadEndpointPolicySetting struct { + Overhead uint16 `json:",omitempty"` +} + +// IovPolicySetting sets the Iov settings for an endpoint. +type IovPolicySetting struct { + IovOffloadWeight uint32 `json:",omitempty"` + QueuePairsRequested uint32 `json:",omitempty"` + InterruptModeration uint32 `json:",omitempty"` +} + +/// Endpoint and Network Policy objects + +// ProviderAddressEndpointPolicySetting sets the PA for an endpoint. +type ProviderAddressEndpointPolicySetting struct { + ProviderAddress string `json:",omitempty"` +} + +// InterfaceConstraintPolicySetting limits an Endpoint or Network to a specific Nic. +type InterfaceConstraintPolicySetting struct { + InterfaceGuid string `json:",omitempty"` + InterfaceLuid uint64 `json:",omitempty"` + InterfaceIndex uint32 `json:",omitempty"` + InterfaceMediaType uint32 `json:",omitempty"` + InterfaceAlias string `json:",omitempty"` + InterfaceDescription string `json:",omitempty"` +} + +/// Network Policy objects + +// SourceMacAddressNetworkPolicySetting sets source MAC for a network. +type SourceMacAddressNetworkPolicySetting struct { + SourceMacAddress string `json:",omitempty"` +} + +// NetAdapterNameNetworkPolicySetting sets network adapter of a network. +type NetAdapterNameNetworkPolicySetting struct { + NetworkAdapterName string `json:",omitempty"` +} + +// VSwitchExtensionNetworkPolicySetting enables/disabled VSwitch extensions for a network. +type VSwitchExtensionNetworkPolicySetting struct { + ExtensionID string `json:",omitempty"` + Enable bool `json:",omitempty"` +} + +// DrMacAddressNetworkPolicySetting sets the DR MAC for a network. +type DrMacAddressNetworkPolicySetting struct { + Address string `json:",omitempty"` +} + +// AutomaticDNSNetworkPolicySetting enables/disables automatic DNS on a network. +type AutomaticDNSNetworkPolicySetting struct { + Enable bool `json:",omitempty"` +} + +type LayerConstraintNetworkPolicySetting struct { + LayerId string `json:",omitempty"` +} + +/// Subnet Policy objects + +// VlanPolicySetting isolates a subnet with VLAN tagging. +type VlanPolicySetting struct { + IsolationId uint32 `json:","` +} + +// VsidPolicySetting isolates a subnet with VSID tagging. +type VsidPolicySetting struct { + IsolationId uint32 `json:","` +} + +// RemoteSubnetRoutePolicySetting creates remote subnet route rules on a network +type RemoteSubnetRoutePolicySetting struct { + DestinationPrefix string + IsolationId uint16 + ProviderAddress string + DistributedRouterMacAddress string +} + +// SetPolicyTypes associated with SetPolicy. Value is IPSET. +type SetPolicyType string + +const ( + SetPolicyTypeIpSet SetPolicyType = "IPSET" + SetPolicyTypeNestedIpSet SetPolicyType = "NESTEDIPSET" +) + +// SetPolicySetting creates IPSets on network +type SetPolicySetting struct { + Id string + Name string + Type SetPolicyType + Values string +} + +// VxlanPortPolicySetting allows configuring the VXLAN TCP port +type VxlanPortPolicySetting struct { + Port uint16 +} + +// ProtocolType associated with L4ProxyPolicy +type ProtocolType uint32 + +const ( + ProtocolTypeUnknown ProtocolType = 0 + ProtocolTypeICMPv4 ProtocolType = 1 + ProtocolTypeIGMP ProtocolType = 2 + ProtocolTypeTCP ProtocolType = 6 + ProtocolTypeUDP ProtocolType = 17 + ProtocolTypeICMPv6 ProtocolType = 58 +) + +//L4ProxyPolicySetting applies proxy policy on network/endpoint +type L4ProxyPolicySetting struct { + IP string `json:",omitempty"` + Port string `json:",omitempty"` + Protocol ProtocolType `json:",omitempty"` + Exceptions []string `json:",omitempty"` + Destination string + OutboundNAT bool `json:",omitempty"` +} + +// TierAclRule represents an ACL within TierAclPolicySetting +type TierAclRule struct { + Id string `json:",omitempty"` + Protocols string `json:",omitempty"` + TierAclRuleAction ActionType `json:","` + LocalAddresses string `json:",omitempty"` + RemoteAddresses string `json:",omitempty"` + LocalPorts string `json:",omitempty"` + RemotePorts string `json:",omitempty"` + Priority uint16 `json:",omitempty"` +} + +// TierAclPolicySetting represents a Tier containing ACLs +type TierAclPolicySetting struct { + Name string `json:","` + Direction DirectionType `json:","` + Order uint16 `json:""` + TierAclRules []TierAclRule `json:",omitempty"` +} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go new file mode 100644 index 0000000000000..52e2498462446 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnroute.go @@ -0,0 +1,266 @@ +package hcn + +import ( + "encoding/json" + "errors" + + "github.com/Microsoft/go-winio/pkg/guid" + "github.com/Microsoft/hcsshim/internal/interop" + "github.com/sirupsen/logrus" +) + +// HostComputeRoute represents SDN routes. +type HostComputeRoute struct { + ID string `json:"ID,omitempty"` + HostComputeEndpoints []string `json:",omitempty"` + Setting []SDNRoutePolicySetting `json:",omitempty"` + SchemaVersion SchemaVersion `json:",omitempty"` +} + +// ListRoutes makes a call to list all available routes. +func ListRoutes() ([]HostComputeRoute, error) { + hcnQuery := defaultQuery() + routes, err := ListRoutesQuery(hcnQuery) + if err != nil { + return nil, err + } + return routes, nil +} + +// ListRoutesQuery makes a call to query the list of available routes. +func ListRoutesQuery(query HostComputeQuery) ([]HostComputeRoute, error) { + queryJSON, err := json.Marshal(query) + if err != nil { + return nil, err + } + + routes, err := enumerateRoutes(string(queryJSON)) + if err != nil { + return nil, err + } + return routes, nil +} + +// GetRouteByID returns the route specified by Id. +func GetRouteByID(routeID string) (*HostComputeRoute, error) { + hcnQuery := defaultQuery() + mapA := map[string]string{"ID": routeID} + filter, err := json.Marshal(mapA) + if err != nil { + return nil, err + } + hcnQuery.Filter = string(filter) + + routes, err := ListRoutesQuery(hcnQuery) + if err != nil { + return nil, err + } + if len(routes) == 0 { + return nil, RouteNotFoundError{RouteId: routeID} + } + return &routes[0], err +} + +// Create Route. +func (route *HostComputeRoute) Create() (*HostComputeRoute, error) { + logrus.Debugf("hcn::HostComputeRoute::Create id=%s", route.ID) + + jsonString, err := json.Marshal(route) + if err != nil { + return nil, err + } + + logrus.Debugf("hcn::HostComputeRoute::Create JSON: %s", jsonString) + route, hcnErr := createRoute(string(jsonString)) + if hcnErr != nil { + return nil, hcnErr + } + return route, nil +} + +// Delete Route. +func (route *HostComputeRoute) Delete() error { + logrus.Debugf("hcn::HostComputeRoute::Delete id=%s", route.ID) + + existingRoute, _ := GetRouteByID(route.ID) + + if existingRoute != nil { + if err := deleteRoute(route.ID); err != nil { + return err + } + } + + return nil +} + +// AddEndpoint add an endpoint to a route +// Since HCNRoute doesn't implement modify functionality, add operation is essentially delete and add +func (route *HostComputeRoute) AddEndpoint(endpoint *HostComputeEndpoint) (*HostComputeRoute, error) { + logrus.Debugf("hcn::HostComputeRoute::AddEndpoint route=%s endpoint=%s", route.ID, endpoint.Id) + + err := route.Delete() + if err != nil { + return nil, err + } + + // Add Endpoint to the Existing List + route.HostComputeEndpoints = append(route.HostComputeEndpoints, endpoint.Id) + + return route.Create() +} + +// RemoveEndpoint removes an endpoint from a route +// Since HCNRoute doesn't implement modify functionality, remove operation is essentially delete and add +func (route *HostComputeRoute) RemoveEndpoint(endpoint *HostComputeEndpoint) (*HostComputeRoute, error) { + logrus.Debugf("hcn::HostComputeRoute::RemoveEndpoint route=%s endpoint=%s", route.ID, endpoint.Id) + + err := route.Delete() + if err != nil { + return nil, err + } + + // Create a list of all the endpoints besides the one being removed + i := 0 + for index, endpointReference := range route.HostComputeEndpoints { + if endpointReference == endpoint.Id { + i = index + break + } + } + + route.HostComputeEndpoints = append(route.HostComputeEndpoints[0:i], route.HostComputeEndpoints[i+1:]...) + return route.Create() +} + +// AddRoute for the specified endpoints and SDN Route setting +func AddRoute(endpoints []HostComputeEndpoint, destinationPrefix string, nextHop string, needEncapsulation bool) (*HostComputeRoute, error) { + logrus.Debugf("hcn::HostComputeRoute::AddRoute endpointId=%v, destinationPrefix=%v, nextHop=%v, needEncapsulation=%v", endpoints, destinationPrefix, nextHop, needEncapsulation) + + if len(endpoints) <= 0 { + return nil, errors.New("missing endpoints") + } + + route := &HostComputeRoute{ + SchemaVersion: V2SchemaVersion(), + Setting: []SDNRoutePolicySetting{ + { + DestinationPrefix: destinationPrefix, + NextHop: nextHop, + NeedEncap: needEncapsulation, + }, + }, + } + + for _, endpoint := range endpoints { + route.HostComputeEndpoints = append(route.HostComputeEndpoints, endpoint.Id) + } + + return route.Create() +} + +func enumerateRoutes(query string) ([]HostComputeRoute, error) { + // Enumerate all routes Guids + var ( + resultBuffer *uint16 + routeBuffer *uint16 + ) + hr := hcnEnumerateRoutes(query, &routeBuffer, &resultBuffer) + if err := checkForErrors("hcnEnumerateRoutes", hr, resultBuffer); err != nil { + return nil, err + } + + routes := interop.ConvertAndFreeCoTaskMemString(routeBuffer) + var routeIds []guid.GUID + if err := json.Unmarshal([]byte(routes), &routeIds); err != nil { + return nil, err + } + + var outputRoutes []HostComputeRoute + for _, routeGUID := range routeIds { + route, err := getRoute(routeGUID, query) + if err != nil { + return nil, err + } + outputRoutes = append(outputRoutes, *route) + } + return outputRoutes, nil +} + +func getRoute(routeGUID guid.GUID, query string) (*HostComputeRoute, error) { + // Open routes. + var ( + routeHandle hcnRoute + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + hr := hcnOpenRoute(&routeGUID, &routeHandle, &resultBuffer) + if err := checkForErrors("hcnOpenRoute", hr, resultBuffer); err != nil { + return nil, err + } + // Query routes. + hr = hcnQueryRouteProperties(routeHandle, query, &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryRouteProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close routes. + hr = hcnCloseRoute(routeHandle) + if err := checkForErrors("hcnCloseRoute", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeRoute + var outputRoute HostComputeRoute + if err := json.Unmarshal([]byte(properties), &outputRoute); err != nil { + return nil, err + } + return &outputRoute, nil +} + +func createRoute(settings string) (*HostComputeRoute, error) { + // Create new route. + var ( + routeHandle hcnRoute + resultBuffer *uint16 + propertiesBuffer *uint16 + ) + routeGUID := guid.GUID{} + hr := hcnCreateRoute(&routeGUID, settings, &routeHandle, &resultBuffer) + if err := checkForErrors("hcnCreateRoute", hr, resultBuffer); err != nil { + return nil, err + } + // Query route. + hcnQuery := defaultQuery() + query, err := json.Marshal(hcnQuery) + if err != nil { + return nil, err + } + hr = hcnQueryRouteProperties(routeHandle, string(query), &propertiesBuffer, &resultBuffer) + if err := checkForErrors("hcnQueryRouteProperties", hr, resultBuffer); err != nil { + return nil, err + } + properties := interop.ConvertAndFreeCoTaskMemString(propertiesBuffer) + // Close Route. + hr = hcnCloseRoute(routeHandle) + if err := checkForErrors("hcnCloseRoute", hr, nil); err != nil { + return nil, err + } + // Convert output to HostComputeRoute + var outputRoute HostComputeRoute + if err := json.Unmarshal([]byte(properties), &outputRoute); err != nil { + return nil, err + } + return &outputRoute, nil +} + +func deleteRoute(routeID string) error { + routeGUID, err := guid.FromString(routeID) + if err != nil { + return errInvalidRouteID + } + var resultBuffer *uint16 + hr := hcnDeleteRoute(&routeGUID, &resultBuffer) + if err := checkForErrors("hcnDeleteRoute", hr, resultBuffer); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go b/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go new file mode 100644 index 0000000000000..bacb91fedaee1 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hcn/hcnsupport.go @@ -0,0 +1,147 @@ +package hcn + +import ( + "fmt" + "sync" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +var ( + // featuresOnce handles assigning the supported features and printing the supported info to stdout only once to avoid unnecessary work + // multiple times. + featuresOnce sync.Once + featuresErr error + supportedFeatures SupportedFeatures +) + +// SupportedFeatures are the features provided by the Service. +type SupportedFeatures struct { + Acl AclFeatures `json:"ACL"` + Api ApiSupport `json:"API"` + RemoteSubnet bool `json:"RemoteSubnet"` + HostRoute bool `json:"HostRoute"` + DSR bool `json:"DSR"` + Slash32EndpointPrefixes bool `json:"Slash32EndpointPrefixes"` + AclSupportForProtocol252 bool `json:"AclSupportForProtocol252"` + SessionAffinity bool `json:"SessionAffinity"` + IPv6DualStack bool `json:"IPv6DualStack"` + SetPolicy bool `json:"SetPolicy"` + VxlanPort bool `json:"VxlanPort"` + L4Proxy bool `json:"L4Proxy"` // network policy that applies VFP rules to all endpoints on the network to redirect traffic + L4WfpProxy bool `json:"L4WfpProxy"` // endpoint policy that applies WFP filters to redirect traffic to/from that endpoint + TierAcl bool `json:"TierAcl"` + NetworkACL bool `json:"NetworkACL"` + NestedIpSet bool `json:"NestedIpSet"` +} + +// AclFeatures are the supported ACL possibilities. +type AclFeatures struct { + AclAddressLists bool `json:"AclAddressLists"` + AclNoHostRulePriority bool `json:"AclHostRulePriority"` + AclPortRanges bool `json:"AclPortRanges"` + AclRuleId bool `json:"AclRuleId"` +} + +// ApiSupport lists the supported API versions. +type ApiSupport struct { + V1 bool `json:"V1"` + V2 bool `json:"V2"` +} + +// GetCachedSupportedFeatures returns the features supported by the Service and an error if the query failed. If this has been called +// before it will return the supported features and error received from the first call. This can be used to optimize if many calls to the +// various hcn.IsXSupported methods need to be made. +func GetCachedSupportedFeatures() (SupportedFeatures, error) { + // Only query the HCN version and features supported once, instead of everytime this is invoked. The logs are useful to + // debug incidents where there's confusion on if a feature is supported on the host machine. The sync.Once helps to avoid redundant + // spam of these anytime a check needs to be made for if an HCN feature is supported. This is a common occurrence in kube-proxy + // for example. + featuresOnce.Do(func() { + supportedFeatures, featuresErr = getSupportedFeatures() + }) + + return supportedFeatures, featuresErr +} + +// GetSupportedFeatures returns the features supported by the Service. +// +// Deprecated: Use GetCachedSupportedFeatures instead. +func GetSupportedFeatures() SupportedFeatures { + features, err := GetCachedSupportedFeatures() + if err != nil { + // Expected on pre-1803 builds, all features will be false/unsupported + logrus.WithError(err).Errorf("unable to obtain supported features") + return features + } + return features +} + +func getSupportedFeatures() (SupportedFeatures, error) { + var features SupportedFeatures + globals, err := GetGlobals() + if err != nil { + // It's expected if this fails once, it should always fail. It should fail on pre 1803 builds for example. + return SupportedFeatures{}, errors.Wrap(err, "failed to query HCN version number: this is expected on pre 1803 builds.") + } + features.Acl = AclFeatures{ + AclAddressLists: isFeatureSupported(globals.Version, HNSVersion1803), + AclNoHostRulePriority: isFeatureSupported(globals.Version, HNSVersion1803), + AclPortRanges: isFeatureSupported(globals.Version, HNSVersion1803), + AclRuleId: isFeatureSupported(globals.Version, HNSVersion1803), + } + + features.Api = ApiSupport{ + V2: isFeatureSupported(globals.Version, V2ApiSupport), + V1: true, // HNSCall is still available. + } + + features.RemoteSubnet = isFeatureSupported(globals.Version, RemoteSubnetVersion) + features.HostRoute = isFeatureSupported(globals.Version, HostRouteVersion) + features.DSR = isFeatureSupported(globals.Version, DSRVersion) + features.Slash32EndpointPrefixes = isFeatureSupported(globals.Version, Slash32EndpointPrefixesVersion) + features.AclSupportForProtocol252 = isFeatureSupported(globals.Version, AclSupportForProtocol252Version) + features.SessionAffinity = isFeatureSupported(globals.Version, SessionAffinityVersion) + features.IPv6DualStack = isFeatureSupported(globals.Version, IPv6DualStackVersion) + features.SetPolicy = isFeatureSupported(globals.Version, SetPolicyVersion) + features.VxlanPort = isFeatureSupported(globals.Version, VxlanPortVersion) + features.L4Proxy = isFeatureSupported(globals.Version, L4ProxyPolicyVersion) + features.L4WfpProxy = isFeatureSupported(globals.Version, L4WfpProxyPolicyVersion) + features.TierAcl = isFeatureSupported(globals.Version, TierAclPolicyVersion) + features.NetworkACL = isFeatureSupported(globals.Version, NetworkACLPolicyVersion) + features.NestedIpSet = isFeatureSupported(globals.Version, NestedIpSetVersion) + + logrus.WithFields(logrus.Fields{ + "version": fmt.Sprintf("%+v", globals.Version), + "supportedFeatures": fmt.Sprintf("%+v", features), + }).Info("HCN feature check") + + return features, nil +} + +func isFeatureSupported(currentVersion Version, versionsSupported VersionRanges) bool { + isFeatureSupported := false + + for _, versionRange := range versionsSupported { + isFeatureSupported = isFeatureSupported || isFeatureInRange(currentVersion, versionRange) + } + + return isFeatureSupported +} + +func isFeatureInRange(currentVersion Version, versionRange VersionRange) bool { + if currentVersion.Major < versionRange.MinVersion.Major { + return false + } + if currentVersion.Major > versionRange.MaxVersion.Major { + return false + } + if currentVersion.Major == versionRange.MinVersion.Major && currentVersion.Minor < versionRange.MinVersion.Minor { + return false + } + if currentVersion.Major == versionRange.MaxVersion.Major && currentVersion.Minor > versionRange.MaxVersion.Minor { + return false + } + return true +} diff --git a/vendor/github.com/Microsoft/hcsshim/hcn/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/hcn/zsyscall_windows.go new file mode 100644 index 0000000000000..7ec5b58b66a8f --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/hcn/zsyscall_windows.go @@ -0,0 +1,795 @@ +// Code generated mksyscall_windows.exe DO NOT EDIT + +package hcn + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modiphlpapi = windows.NewLazySystemDLL("iphlpapi.dll") + modvmcompute = windows.NewLazySystemDLL("vmcompute.dll") + modcomputenetwork = windows.NewLazySystemDLL("computenetwork.dll") + + procSetCurrentThreadCompartmentId = modiphlpapi.NewProc("SetCurrentThreadCompartmentId") + procHNSCall = modvmcompute.NewProc("HNSCall") + procHcnEnumerateNetworks = modcomputenetwork.NewProc("HcnEnumerateNetworks") + procHcnCreateNetwork = modcomputenetwork.NewProc("HcnCreateNetwork") + procHcnOpenNetwork = modcomputenetwork.NewProc("HcnOpenNetwork") + procHcnModifyNetwork = modcomputenetwork.NewProc("HcnModifyNetwork") + procHcnQueryNetworkProperties = modcomputenetwork.NewProc("HcnQueryNetworkProperties") + procHcnDeleteNetwork = modcomputenetwork.NewProc("HcnDeleteNetwork") + procHcnCloseNetwork = modcomputenetwork.NewProc("HcnCloseNetwork") + procHcnEnumerateEndpoints = modcomputenetwork.NewProc("HcnEnumerateEndpoints") + procHcnCreateEndpoint = modcomputenetwork.NewProc("HcnCreateEndpoint") + procHcnOpenEndpoint = modcomputenetwork.NewProc("HcnOpenEndpoint") + procHcnModifyEndpoint = modcomputenetwork.NewProc("HcnModifyEndpoint") + procHcnQueryEndpointProperties = modcomputenetwork.NewProc("HcnQueryEndpointProperties") + procHcnDeleteEndpoint = modcomputenetwork.NewProc("HcnDeleteEndpoint") + procHcnCloseEndpoint = modcomputenetwork.NewProc("HcnCloseEndpoint") + procHcnEnumerateNamespaces = modcomputenetwork.NewProc("HcnEnumerateNamespaces") + procHcnCreateNamespace = modcomputenetwork.NewProc("HcnCreateNamespace") + procHcnOpenNamespace = modcomputenetwork.NewProc("HcnOpenNamespace") + procHcnModifyNamespace = modcomputenetwork.NewProc("HcnModifyNamespace") + procHcnQueryNamespaceProperties = modcomputenetwork.NewProc("HcnQueryNamespaceProperties") + procHcnDeleteNamespace = modcomputenetwork.NewProc("HcnDeleteNamespace") + procHcnCloseNamespace = modcomputenetwork.NewProc("HcnCloseNamespace") + procHcnEnumerateLoadBalancers = modcomputenetwork.NewProc("HcnEnumerateLoadBalancers") + procHcnCreateLoadBalancer = modcomputenetwork.NewProc("HcnCreateLoadBalancer") + procHcnOpenLoadBalancer = modcomputenetwork.NewProc("HcnOpenLoadBalancer") + procHcnModifyLoadBalancer = modcomputenetwork.NewProc("HcnModifyLoadBalancer") + procHcnQueryLoadBalancerProperties = modcomputenetwork.NewProc("HcnQueryLoadBalancerProperties") + procHcnDeleteLoadBalancer = modcomputenetwork.NewProc("HcnDeleteLoadBalancer") + procHcnCloseLoadBalancer = modcomputenetwork.NewProc("HcnCloseLoadBalancer") + procHcnEnumerateSdnRoutes = modcomputenetwork.NewProc("HcnEnumerateSdnRoutes") + procHcnCreateSdnRoute = modcomputenetwork.NewProc("HcnCreateSdnRoute") + procHcnOpenSdnRoute = modcomputenetwork.NewProc("HcnOpenSdnRoute") + procHcnModifySdnRoute = modcomputenetwork.NewProc("HcnModifySdnRoute") + procHcnQuerySdnRouteProperties = modcomputenetwork.NewProc("HcnQuerySdnRouteProperties") + procHcnDeleteSdnRoute = modcomputenetwork.NewProc("HcnDeleteSdnRoute") + procHcnCloseSdnRoute = modcomputenetwork.NewProc("HcnCloseSdnRoute") +) + +func SetCurrentThreadCompartmentId(compartmentId uint32) (hr error) { + r0, _, _ := syscall.Syscall(procSetCurrentThreadCompartmentId.Addr(), 1, uintptr(compartmentId), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func _hnsCall(method string, path string, object string, response **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(method) + if hr != nil { + return + } + var _p1 *uint16 + _p1, hr = syscall.UTF16PtrFromString(path) + if hr != nil { + return + } + var _p2 *uint16 + _p2, hr = syscall.UTF16PtrFromString(object) + if hr != nil { + return + } + return __hnsCall(_p0, _p1, _p2, response) +} + +func __hnsCall(method *uint16, path *uint16, object *uint16, response **uint16) (hr error) { + if hr = procHNSCall.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHNSCall.Addr(), 4, uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnEnumerateNetworks(query string, networks **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcnEnumerateNetworks(_p0, networks, result) +} + +func _hcnEnumerateNetworks(query *uint16, networks **uint16, result **uint16) (hr error) { + if hr = procHcnEnumerateNetworks.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnEnumerateNetworks.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(networks)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnCreateNetwork(id *_guid, settings string, network *hcnNetwork, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcnCreateNetwork(id, _p0, network, result) +} + +func _hcnCreateNetwork(id *_guid, settings *uint16, network *hcnNetwork, result **uint16) (hr error) { + if hr = procHcnCreateNetwork.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnCreateNetwork.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(network)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnOpenNetwork(id *_guid, network *hcnNetwork, result **uint16) (hr error) { + if hr = procHcnOpenNetwork.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnOpenNetwork.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(network)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnModifyNetwork(network hcnNetwork, settings string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcnModifyNetwork(network, _p0, result) +} + +func _hcnModifyNetwork(network hcnNetwork, settings *uint16, result **uint16) (hr error) { + if hr = procHcnModifyNetwork.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnModifyNetwork.Addr(), 3, uintptr(network), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnQueryNetworkProperties(network hcnNetwork, query string, properties **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcnQueryNetworkProperties(network, _p0, properties, result) +} + +func _hcnQueryNetworkProperties(network hcnNetwork, query *uint16, properties **uint16, result **uint16) (hr error) { + if hr = procHcnQueryNetworkProperties.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnQueryNetworkProperties.Addr(), 4, uintptr(network), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnDeleteNetwork(id *_guid, result **uint16) (hr error) { + if hr = procHcnDeleteNetwork.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnDeleteNetwork.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnCloseNetwork(network hcnNetwork) (hr error) { + if hr = procHcnCloseNetwork.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnCloseNetwork.Addr(), 1, uintptr(network), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnEnumerateEndpoints(query string, endpoints **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcnEnumerateEndpoints(_p0, endpoints, result) +} + +func _hcnEnumerateEndpoints(query *uint16, endpoints **uint16, result **uint16) (hr error) { + if hr = procHcnEnumerateEndpoints.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnEnumerateEndpoints.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(endpoints)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnCreateEndpoint(network hcnNetwork, id *_guid, settings string, endpoint *hcnEndpoint, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcnCreateEndpoint(network, id, _p0, endpoint, result) +} + +func _hcnCreateEndpoint(network hcnNetwork, id *_guid, settings *uint16, endpoint *hcnEndpoint, result **uint16) (hr error) { + if hr = procHcnCreateEndpoint.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnCreateEndpoint.Addr(), 5, uintptr(network), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(endpoint)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnOpenEndpoint(id *_guid, endpoint *hcnEndpoint, result **uint16) (hr error) { + if hr = procHcnOpenEndpoint.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnOpenEndpoint.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(endpoint)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnModifyEndpoint(endpoint hcnEndpoint, settings string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcnModifyEndpoint(endpoint, _p0, result) +} + +func _hcnModifyEndpoint(endpoint hcnEndpoint, settings *uint16, result **uint16) (hr error) { + if hr = procHcnModifyEndpoint.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnModifyEndpoint.Addr(), 3, uintptr(endpoint), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnQueryEndpointProperties(endpoint hcnEndpoint, query string, properties **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcnQueryEndpointProperties(endpoint, _p0, properties, result) +} + +func _hcnQueryEndpointProperties(endpoint hcnEndpoint, query *uint16, properties **uint16, result **uint16) (hr error) { + if hr = procHcnQueryEndpointProperties.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnQueryEndpointProperties.Addr(), 4, uintptr(endpoint), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnDeleteEndpoint(id *_guid, result **uint16) (hr error) { + if hr = procHcnDeleteEndpoint.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnDeleteEndpoint.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnCloseEndpoint(endpoint hcnEndpoint) (hr error) { + if hr = procHcnCloseEndpoint.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnCloseEndpoint.Addr(), 1, uintptr(endpoint), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnEnumerateNamespaces(query string, namespaces **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcnEnumerateNamespaces(_p0, namespaces, result) +} + +func _hcnEnumerateNamespaces(query *uint16, namespaces **uint16, result **uint16) (hr error) { + if hr = procHcnEnumerateNamespaces.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnEnumerateNamespaces.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(namespaces)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnCreateNamespace(id *_guid, settings string, namespace *hcnNamespace, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcnCreateNamespace(id, _p0, namespace, result) +} + +func _hcnCreateNamespace(id *_guid, settings *uint16, namespace *hcnNamespace, result **uint16) (hr error) { + if hr = procHcnCreateNamespace.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnCreateNamespace.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(namespace)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnOpenNamespace(id *_guid, namespace *hcnNamespace, result **uint16) (hr error) { + if hr = procHcnOpenNamespace.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnOpenNamespace.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(namespace)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnModifyNamespace(namespace hcnNamespace, settings string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcnModifyNamespace(namespace, _p0, result) +} + +func _hcnModifyNamespace(namespace hcnNamespace, settings *uint16, result **uint16) (hr error) { + if hr = procHcnModifyNamespace.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnModifyNamespace.Addr(), 3, uintptr(namespace), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnQueryNamespaceProperties(namespace hcnNamespace, query string, properties **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcnQueryNamespaceProperties(namespace, _p0, properties, result) +} + +func _hcnQueryNamespaceProperties(namespace hcnNamespace, query *uint16, properties **uint16, result **uint16) (hr error) { + if hr = procHcnQueryNamespaceProperties.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnQueryNamespaceProperties.Addr(), 4, uintptr(namespace), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnDeleteNamespace(id *_guid, result **uint16) (hr error) { + if hr = procHcnDeleteNamespace.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnDeleteNamespace.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnCloseNamespace(namespace hcnNamespace) (hr error) { + if hr = procHcnCloseNamespace.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnCloseNamespace.Addr(), 1, uintptr(namespace), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnEnumerateLoadBalancers(query string, loadBalancers **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcnEnumerateLoadBalancers(_p0, loadBalancers, result) +} + +func _hcnEnumerateLoadBalancers(query *uint16, loadBalancers **uint16, result **uint16) (hr error) { + if hr = procHcnEnumerateLoadBalancers.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnEnumerateLoadBalancers.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(loadBalancers)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnCreateLoadBalancer(id *_guid, settings string, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcnCreateLoadBalancer(id, _p0, loadBalancer, result) +} + +func _hcnCreateLoadBalancer(id *_guid, settings *uint16, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) { + if hr = procHcnCreateLoadBalancer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnCreateLoadBalancer.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(loadBalancer)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnOpenLoadBalancer(id *_guid, loadBalancer *hcnLoadBalancer, result **uint16) (hr error) { + if hr = procHcnOpenLoadBalancer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnOpenLoadBalancer.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(loadBalancer)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnModifyLoadBalancer(loadBalancer hcnLoadBalancer, settings string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcnModifyLoadBalancer(loadBalancer, _p0, result) +} + +func _hcnModifyLoadBalancer(loadBalancer hcnLoadBalancer, settings *uint16, result **uint16) (hr error) { + if hr = procHcnModifyLoadBalancer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnModifyLoadBalancer.Addr(), 3, uintptr(loadBalancer), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnQueryLoadBalancerProperties(loadBalancer hcnLoadBalancer, query string, properties **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcnQueryLoadBalancerProperties(loadBalancer, _p0, properties, result) +} + +func _hcnQueryLoadBalancerProperties(loadBalancer hcnLoadBalancer, query *uint16, properties **uint16, result **uint16) (hr error) { + if hr = procHcnQueryLoadBalancerProperties.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnQueryLoadBalancerProperties.Addr(), 4, uintptr(loadBalancer), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnDeleteLoadBalancer(id *_guid, result **uint16) (hr error) { + if hr = procHcnDeleteLoadBalancer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnDeleteLoadBalancer.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnCloseLoadBalancer(loadBalancer hcnLoadBalancer) (hr error) { + if hr = procHcnCloseLoadBalancer.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnCloseLoadBalancer.Addr(), 1, uintptr(loadBalancer), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnEnumerateRoutes(query string, routes **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcnEnumerateRoutes(_p0, routes, result) +} + +func _hcnEnumerateRoutes(query *uint16, routes **uint16, result **uint16) (hr error) { + if hr = procHcnEnumerateSdnRoutes.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnEnumerateSdnRoutes.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(routes)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnCreateRoute(id *_guid, settings string, route *hcnRoute, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcnCreateRoute(id, _p0, route, result) +} + +func _hcnCreateRoute(id *_guid, settings *uint16, route *hcnRoute, result **uint16) (hr error) { + if hr = procHcnCreateSdnRoute.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnCreateSdnRoute.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(route)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnOpenRoute(id *_guid, route *hcnRoute, result **uint16) (hr error) { + if hr = procHcnOpenSdnRoute.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnOpenSdnRoute.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(route)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnModifyRoute(route hcnRoute, settings string, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(settings) + if hr != nil { + return + } + return _hcnModifyRoute(route, _p0, result) +} + +func _hcnModifyRoute(route hcnRoute, settings *uint16, result **uint16) (hr error) { + if hr = procHcnModifySdnRoute.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnModifySdnRoute.Addr(), 3, uintptr(route), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnQueryRouteProperties(route hcnRoute, query string, properties **uint16, result **uint16) (hr error) { + var _p0 *uint16 + _p0, hr = syscall.UTF16PtrFromString(query) + if hr != nil { + return + } + return _hcnQueryRouteProperties(route, _p0, properties, result) +} + +func _hcnQueryRouteProperties(route hcnRoute, query *uint16, properties **uint16, result **uint16) (hr error) { + if hr = procHcnQuerySdnRouteProperties.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall6(procHcnQuerySdnRouteProperties.Addr(), 4, uintptr(route), uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnDeleteRoute(id *_guid, result **uint16) (hr error) { + if hr = procHcnDeleteSdnRoute.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnDeleteSdnRoute.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(result)), 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} + +func hcnCloseRoute(route hcnRoute) (hr error) { + if hr = procHcnCloseSdnRoute.Find(); hr != nil { + return + } + r0, _, _ := syscall.Syscall(procHcnCloseSdnRoute.Addr(), 1, uintptr(route), 0, 0) + if int32(r0) < 0 { + if r0&0x1fff0000 == 0x00070000 { + r0 &= 0xffff + } + hr = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go b/vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go new file mode 100644 index 0000000000000..4a4fcea843f1c --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/cni/registry.go @@ -0,0 +1,110 @@ +package cni + +import ( + "errors" + + "github.com/Microsoft/go-winio/pkg/guid" + "github.com/Microsoft/hcsshim/internal/regstate" +) + +const ( + cniRoot = "cni" + cniKey = "cfg" +) + +// PersistedNamespaceConfig is the registry version of the `NamespaceID` to UVM +// map. +type PersistedNamespaceConfig struct { + namespaceID string + stored bool + + ContainerID string + HostUniqueID guid.GUID +} + +// NewPersistedNamespaceConfig creates an in-memory namespace config that can be +// persisted to the registry. +func NewPersistedNamespaceConfig(namespaceID, containerID string, containerHostUniqueID guid.GUID) *PersistedNamespaceConfig { + return &PersistedNamespaceConfig{ + namespaceID: namespaceID, + ContainerID: containerID, + HostUniqueID: containerHostUniqueID, + } +} + +// LoadPersistedNamespaceConfig loads a persisted config from the registry that matches +// `namespaceID`. If not found returns `regstate.NotFoundError` +func LoadPersistedNamespaceConfig(namespaceID string) (*PersistedNamespaceConfig, error) { + sk, err := regstate.Open(cniRoot, false) + if err != nil { + return nil, err + } + defer sk.Close() + + pnc := PersistedNamespaceConfig{ + namespaceID: namespaceID, + stored: true, + } + if err := sk.Get(namespaceID, cniKey, &pnc); err != nil { + return nil, err + } + return &pnc, nil +} + +// Store stores or updates the in-memory config to its registry state. If the +// store failes returns the store error. +func (pnc *PersistedNamespaceConfig) Store() error { + if pnc.namespaceID == "" { + return errors.New("invalid namespaceID ''") + } + if pnc.ContainerID == "" { + return errors.New("invalid containerID ''") + } + empty := guid.GUID{} + if pnc.HostUniqueID == empty { + return errors.New("invalid containerHostUniqueID 'empy'") + } + sk, err := regstate.Open(cniRoot, false) + if err != nil { + return err + } + defer sk.Close() + + if pnc.stored { + if err := sk.Set(pnc.namespaceID, cniKey, pnc); err != nil { + return err + } + } else { + if err := sk.Create(pnc.namespaceID, cniKey, pnc); err != nil { + return err + } + } + pnc.stored = true + return nil +} + +// Remove removes any persisted state associated with this config. If the config +// is not found in the registery `Remove` returns no error. +func (pnc *PersistedNamespaceConfig) Remove() error { + if pnc.stored { + sk, err := regstate.Open(cniRoot, false) + if err != nil { + if regstate.IsNotFoundError(err) { + pnc.stored = false + return nil + } + return err + } + defer sk.Close() + + if err := sk.Remove(pnc.namespaceID); err != nil { + if regstate.IsNotFoundError(err) { + pnc.stored = false + return nil + } + return err + } + } + pnc.stored = false + return nil +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go b/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go new file mode 100644 index 0000000000000..dcbc9334d7698 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/regstate/regstate.go @@ -0,0 +1,288 @@ +package regstate + +import ( + "encoding/json" + "fmt" + "net/url" + "os" + "path/filepath" + "reflect" + "syscall" + + "golang.org/x/sys/windows" + "golang.org/x/sys/windows/registry" +) + +//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go regstate.go + +//sys regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) = advapi32.RegCreateKeyExW + +const ( + _REG_OPTION_VOLATILE = 1 + + _REG_OPENED_EXISTING_KEY = 2 +) + +type Key struct { + registry.Key + Name string +} + +var localMachine = &Key{registry.LOCAL_MACHINE, "HKEY_LOCAL_MACHINE"} +var localUser = &Key{registry.CURRENT_USER, "HKEY_CURRENT_USER"} + +var rootPath = `SOFTWARE\Microsoft\runhcs` + +type NotFoundError struct { + ID string +} + +func (err *NotFoundError) Error() string { + return fmt.Sprintf("ID '%s' was not found", err.ID) +} + +func IsNotFoundError(err error) bool { + _, ok := err.(*NotFoundError) + return ok +} + +type NoStateError struct { + ID string + Key string +} + +func (err *NoStateError) Error() string { + return fmt.Sprintf("state '%s' is not present for ID '%s'", err.Key, err.ID) +} + +func createVolatileKey(k *Key, path string, access uint32) (newk *Key, openedExisting bool, err error) { + var ( + h syscall.Handle + d uint32 + ) + fullpath := filepath.Join(k.Name, path) + pathPtr, _ := windows.UTF16PtrFromString(path) + err = regCreateKeyEx(syscall.Handle(k.Key), pathPtr, 0, nil, _REG_OPTION_VOLATILE, access, nil, &h, &d) + if err != nil { + return nil, false, &os.PathError{Op: "RegCreateKeyEx", Path: fullpath, Err: err} + } + return &Key{registry.Key(h), fullpath}, d == _REG_OPENED_EXISTING_KEY, nil +} + +func hive(perUser bool) *Key { + r := localMachine + if perUser { + r = localUser + } + return r +} + +func Open(root string, perUser bool) (*Key, error) { + k, _, err := createVolatileKey(hive(perUser), rootPath, registry.ALL_ACCESS) + if err != nil { + return nil, err + } + defer k.Close() + + k2, _, err := createVolatileKey(k, url.PathEscape(root), registry.ALL_ACCESS) + if err != nil { + return nil, err + } + return k2, nil +} + +func RemoveAll(root string, perUser bool) error { + k, err := hive(perUser).open(rootPath) + if err != nil { + return err + } + defer k.Close() + r, err := k.open(url.PathEscape(root)) + if err != nil { + return err + } + defer r.Close() + ids, err := r.Enumerate() + if err != nil { + return err + } + for _, id := range ids { + err = r.Remove(id) + if err != nil { + return err + } + } + r.Close() + return k.Remove(root) +} + +func (k *Key) Close() error { + err := k.Key.Close() + k.Key = 0 + return err +} + +func (k *Key) Enumerate() ([]string, error) { + escapedIDs, err := k.ReadSubKeyNames(0) + if err != nil { + return nil, err + } + var ids []string + for _, e := range escapedIDs { + id, err := url.PathUnescape(e) + if err == nil { + ids = append(ids, id) + } + } + return ids, nil +} + +func (k *Key) open(name string) (*Key, error) { + fullpath := filepath.Join(k.Name, name) + nk, err := registry.OpenKey(k.Key, name, registry.ALL_ACCESS) + if err != nil { + return nil, &os.PathError{Op: "RegOpenKey", Path: fullpath, Err: err} + } + return &Key{nk, fullpath}, nil +} + +func (k *Key) openid(id string) (*Key, error) { + escaped := url.PathEscape(id) + fullpath := filepath.Join(k.Name, escaped) + nk, err := k.open(escaped) + if perr, ok := err.(*os.PathError); ok && perr.Err == syscall.ERROR_FILE_NOT_FOUND { + return nil, &NotFoundError{id} + } + if err != nil { + return nil, &os.PathError{Op: "RegOpenKey", Path: fullpath, Err: err} + } + return nk, nil +} + +func (k *Key) Remove(id string) error { + escaped := url.PathEscape(id) + err := registry.DeleteKey(k.Key, escaped) + if err != nil { + if err == syscall.ERROR_FILE_NOT_FOUND { + return &NotFoundError{id} + } + return &os.PathError{Op: "RegDeleteKey", Path: filepath.Join(k.Name, escaped), Err: err} + } + return nil +} + +func (k *Key) set(id string, create bool, key string, state interface{}) error { + var sk *Key + var err error + if create { + var existing bool + eid := url.PathEscape(id) + sk, existing, err = createVolatileKey(k, eid, registry.ALL_ACCESS) + if err != nil { + return err + } + defer sk.Close() + if existing { + sk.Close() + return fmt.Errorf("container %s already exists", id) + } + } else { + sk, err = k.openid(id) + if err != nil { + return err + } + defer sk.Close() + } + switch reflect.TypeOf(state).Kind() { + case reflect.Bool: + v := uint32(0) + if state.(bool) { + v = 1 + } + err = sk.SetDWordValue(key, v) + case reflect.Int: + err = sk.SetQWordValue(key, uint64(state.(int))) + case reflect.String: + err = sk.SetStringValue(key, state.(string)) + default: + var js []byte + js, err = json.Marshal(state) + if err != nil { + return err + } + err = sk.SetBinaryValue(key, js) + } + if err != nil { + if err == syscall.ERROR_FILE_NOT_FOUND { + return &NoStateError{id, key} + } + return &os.PathError{Op: "RegSetValueEx", Path: sk.Name + ":" + key, Err: err} + } + return nil +} + +func (k *Key) Create(id, key string, state interface{}) error { + return k.set(id, true, key, state) +} + +func (k *Key) Set(id, key string, state interface{}) error { + return k.set(id, false, key, state) +} + +func (k *Key) Clear(id, key string) error { + sk, err := k.openid(id) + if err != nil { + return err + } + defer sk.Close() + err = sk.DeleteValue(key) + if err != nil { + if err == syscall.ERROR_FILE_NOT_FOUND { + return &NoStateError{id, key} + } + return &os.PathError{Op: "RegDeleteValue", Path: sk.Name + ":" + key, Err: err} + } + return nil +} + +func (k *Key) Get(id, key string, state interface{}) error { + sk, err := k.openid(id) + if err != nil { + return err + } + defer sk.Close() + + var js []byte + switch reflect.TypeOf(state).Elem().Kind() { + case reflect.Bool: + var v uint64 + v, _, err = sk.GetIntegerValue(key) + if err == nil { + *state.(*bool) = v != 0 + } + case reflect.Int: + var v uint64 + v, _, err = sk.GetIntegerValue(key) + if err == nil { + *state.(*int) = int(v) + } + case reflect.String: + var v string + v, _, err = sk.GetStringValue(key) + if err == nil { + *state.(*string) = string(v) + } + default: + js, _, err = sk.GetBinaryValue(key) + } + if err != nil { + if err == syscall.ERROR_FILE_NOT_FOUND { + return &NoStateError{id, key} + } + return &os.PathError{Op: "RegQueryValueEx", Path: sk.Name + ":" + key, Err: err} + } + if js != nil { + err = json.Unmarshal(js, state) + } + return err +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/regstate/zsyscall_windows.go b/vendor/github.com/Microsoft/hcsshim/internal/regstate/zsyscall_windows.go new file mode 100644 index 0000000000000..4e349ad498497 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/regstate/zsyscall_windows.go @@ -0,0 +1,51 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package regstate + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return nil + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + + procRegCreateKeyExW = modadvapi32.NewProc("RegCreateKeyExW") +) + +func regCreateKeyEx(key syscall.Handle, subkey *uint16, reserved uint32, class *uint16, options uint32, desired uint32, sa *syscall.SecurityAttributes, result *syscall.Handle, disposition *uint32) (regerrno error) { + r0, _, _ := syscall.Syscall9(procRegCreateKeyExW.Addr(), 9, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(reserved), uintptr(unsafe.Pointer(class)), uintptr(options), uintptr(desired), uintptr(unsafe.Pointer(sa)), uintptr(unsafe.Pointer(result)), uintptr(unsafe.Pointer(disposition))) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go b/vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go new file mode 100644 index 0000000000000..a161c204e2faa --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/runhcs/container.go @@ -0,0 +1,71 @@ +package runhcs + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "syscall" + "time" + + "github.com/Microsoft/go-winio/pkg/guid" +) + +// ContainerState represents the platform agnostic pieces relating to a +// running container's status and state +type ContainerState struct { + // Version is the OCI version for the container + Version string `json:"ociVersion"` + // ID is the container ID + ID string `json:"id"` + // InitProcessPid is the init process id in the parent namespace + InitProcessPid int `json:"pid"` + // Status is the current status of the container, running, paused, ... + Status string `json:"status"` + // Bundle is the path on the filesystem to the bundle + Bundle string `json:"bundle"` + // Rootfs is a path to a directory containing the container's root filesystem. + Rootfs string `json:"rootfs"` + // Created is the unix timestamp for the creation time of the container in UTC + Created time.Time `json:"created"` + // Annotations is the user defined annotations added to the config. + Annotations map[string]string `json:"annotations,omitempty"` + // The owner of the state directory (the owner of the container). + Owner string `json:"owner"` +} + +// GetErrorFromPipe returns reads from `pipe` and verifies if the operation +// returned success or error. If error converts that to an error and returns. If +// `p` is not nill will issue a `Kill` and `Wait` for exit. +func GetErrorFromPipe(pipe io.Reader, p *os.Process) error { + serr, err := ioutil.ReadAll(pipe) + if err != nil { + return err + } + + if bytes.Equal(serr, ShimSuccess) { + return nil + } + + extra := "" + if p != nil { + _ = p.Kill() + state, err := p.Wait() + if err != nil { + panic(err) + } + extra = fmt.Sprintf(", exit code %d", state.Sys().(syscall.WaitStatus).ExitCode) + } + if len(serr) == 0 { + return fmt.Errorf("unknown shim failure%s", extra) + } + + return errors.New(string(serr)) +} + +// VMPipePath returns the named pipe path for the vm shim. +func VMPipePath(hostUniqueID guid.GUID) string { + return SafePipePath("runhcs-vm-" + hostUniqueID.String()) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/runhcs/util.go b/vendor/github.com/Microsoft/hcsshim/internal/runhcs/util.go new file mode 100644 index 0000000000000..dcbb1903b8f00 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/runhcs/util.go @@ -0,0 +1,16 @@ +package runhcs + +import "net/url" + +const ( + SafePipePrefix = `\\.\pipe\ProtectedPrefix\Administrators\` +) + +// ShimSuccess is the byte stream returned on a successful operation. +var ShimSuccess = []byte{0, 'O', 'K', 0} + +func SafePipePath(name string) string { + // Use a pipe in the Administrators protected prefixed to prevent malicious + // squatting. + return SafePipePrefix + url.PathEscape(name) +} diff --git a/vendor/github.com/Microsoft/hcsshim/internal/runhcs/vm.go b/vendor/github.com/Microsoft/hcsshim/internal/runhcs/vm.go new file mode 100644 index 0000000000000..2c8957b88df76 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/internal/runhcs/vm.go @@ -0,0 +1,43 @@ +package runhcs + +import ( + "encoding/json" + + "github.com/Microsoft/go-winio" +) + +// VMRequestOp is an operation that can be issued to a VM shim. +type VMRequestOp string + +const ( + // OpCreateContainer is a create container request. + OpCreateContainer VMRequestOp = "create" + // OpSyncNamespace is a `cni.NamespaceTypeGuest` sync request with the UVM. + OpSyncNamespace VMRequestOp = "sync" + // OpUnmountContainer is a container unmount request. + OpUnmountContainer VMRequestOp = "unmount" + // OpUnmountContainerDiskOnly is a container unmount disk request. + OpUnmountContainerDiskOnly VMRequestOp = "unmount-disk" +) + +// VMRequest is an operation request that is issued to a VM shim. +type VMRequest struct { + ID string + Op VMRequestOp +} + +// IssueVMRequest issues a request to a shim at the given pipe. +func IssueVMRequest(pipepath string, req *VMRequest) error { + pipe, err := winio.DialPipe(pipepath, nil) + if err != nil { + return err + } + defer pipe.Close() + if err := json.NewEncoder(pipe).Encode(req); err != nil { + return err + } + if err := GetErrorFromPipe(pipe, nil); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/containerd/go-cni/.gitignore b/vendor/github.com/containerd/go-cni/.gitignore new file mode 100644 index 0000000000000..04249514ed315 --- /dev/null +++ b/vendor/github.com/containerd/go-cni/.gitignore @@ -0,0 +1,3 @@ +/bin/ +coverage.txt +profile.out diff --git a/vendor/github.com/containerd/go-cni/.golangci.yml b/vendor/github.com/containerd/go-cni/.golangci.yml new file mode 100644 index 0000000000000..673fd33a2f987 --- /dev/null +++ b/vendor/github.com/containerd/go-cni/.golangci.yml @@ -0,0 +1,23 @@ +linters: + enable: + - structcheck + - varcheck + - staticcheck + - unconvert + - gofmt + - goimports + - revive + - ineffassign + - vet + - unused + - misspell + disable: + - errcheck + +# FIXME: re-enable after fixing GoDoc in this repository +#issues: +# include: +# - EXC0002 + +run: + timeout: 2m diff --git a/vendor/github.com/containerd/go-cni/LICENSE b/vendor/github.com/containerd/go-cni/LICENSE new file mode 100644 index 0000000000000..261eeb9e9f8b2 --- /dev/null +++ b/vendor/github.com/containerd/go-cni/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/go-cni/Makefile b/vendor/github.com/containerd/go-cni/Makefile new file mode 100644 index 0000000000000..0b2edf770761c --- /dev/null +++ b/vendor/github.com/containerd/go-cni/Makefile @@ -0,0 +1,41 @@ +# Copyright The containerd Authors. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +TESTFLAGS_PARALLEL ?= 8 + +EXTRA_TESTFLAGS ?= + +# quiet or not +ifeq ($(V),1) + Q = +else + Q = @ +endif + +.PHONY: test integration clean help + +help: ## this help + @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) | sort + +test: ## run tests, except integration tests and tests that require root + $(Q)go test -v -race $(EXTRA_TESTFLAGS) -count=1 ./... + +integration: bin/integration.test ## run integration test + $(Q)bin/integration.test -test.v -test.count=1 -test.root $(EXTRA_TESTFLAGS) -test.parallel $(TESTFLAGS_PARALLEL) + +bin/integration.test: ## build integration test binary into bin + $(Q)cd ./integration && go test -race -c . -o ../bin/integration.test + +clean: ## clean up binaries + $(Q)rm -rf bin/ diff --git a/vendor/github.com/containerd/go-cni/README.md b/vendor/github.com/containerd/go-cni/README.md new file mode 100644 index 0000000000000..d028749f128f5 --- /dev/null +++ b/vendor/github.com/containerd/go-cni/README.md @@ -0,0 +1,96 @@ +# go-cni + +[![PkgGoDev](https://pkg.go.dev/badge/github.com/containerd/go-cni)](https://pkg.go.dev/github.com/containerd/go-cni) +[![Build Status](https://github.com/containerd/go-cni/workflows/CI/badge.svg)](https://github.com/containerd/go-cni/actions?query=workflow%3ACI) +[![codecov](https://codecov.io/gh/containerd/go-cni/branch/main/graph/badge.svg)](https://codecov.io/gh/containerd/go-cni) +[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/go-cni)](https://goreportcard.com/report/github.com/containerd/go-cni) + +A generic CNI library to provide APIs for CNI plugin interactions. The library provides APIs to: + +- Load CNI network config from different sources +- Setup networks for container namespace +- Remove networks from container namespace +- Query status of CNI network plugin initialization +- Check verifies the network is still in desired state + +go-cni aims to support plugins that implement [Container Network Interface](https://github.com/containernetworking/cni) + +## Usage +```go +package main + +import ( + "context" + "fmt" + "log" + + gocni "github.com/containerd/go-cni" +) + +func main() { + id := "example" + netns := "/var/run/netns/example-ns-1" + + // CNI allows multiple CNI configurations and the network interface + // will be named by eth0, eth1, ..., ethN. + ifPrefixName := "eth" + defaultIfName := "eth0" + + // Initializes library + l, err := gocni.New( + // one for loopback network interface + gocni.WithMinNetworkCount(2), + gocni.WithPluginConfDir("/etc/cni/net.d"), + gocni.WithPluginDir([]string{"/opt/cni/bin"}), + // Sets the prefix for network interfaces, eth by default + gocni.WithInterfacePrefix(ifPrefixName)) + if err != nil { + log.Fatalf("failed to initialize cni library: %v", err) + } + + // Load the cni configuration + if err := l.Load(gocni.WithLoNetwork, gocni.WithDefaultConf); err != nil { + log.Fatalf("failed to load cni configuration: %v", err) + } + + // Setup network for namespace. + labels := map[string]string{ + "K8S_POD_NAMESPACE": "namespace1", + "K8S_POD_NAME": "pod1", + "K8S_POD_INFRA_CONTAINER_ID": id, + // Plugin tolerates all Args embedded by unknown labels, like + // K8S_POD_NAMESPACE/NAME/INFRA_CONTAINER_ID... + "IgnoreUnknown": "1", + } + + ctx := context.Background() + + // Teardown network + defer func() { + if err := l.Remove(ctx, id, netns, gocni.WithLabels(labels)); err != nil { + log.Fatalf("failed to teardown network: %v", err) + } + }() + + // Setup network + result, err := l.Setup(ctx, id, netns, gocni.WithLabels(labels)) + if err != nil { + log.Fatalf("failed to setup network for namespace: %v", err) + } + + // Get IP of the default interface + IP := result.Interfaces[defaultIfName].IPConfigs[0].IP.String() + fmt.Printf("IP of the default interface %s:%s", defaultIfName, IP) +} +``` + +## Project details + +The go-cni is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). +As a containerd sub-project, you will find the: + + * [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md), + * [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS), + * and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md) + +information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/go-cni/cni.go b/vendor/github.com/containerd/go-cni/cni.go new file mode 100644 index 0000000000000..b10af47ab602a --- /dev/null +++ b/vendor/github.com/containerd/go-cni/cni.go @@ -0,0 +1,312 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cni + +import ( + "context" + "fmt" + "os" + "strings" + "sync" + + cnilibrary "github.com/containernetworking/cni/libcni" + "github.com/containernetworking/cni/pkg/invoke" + "github.com/containernetworking/cni/pkg/types" + types100 "github.com/containernetworking/cni/pkg/types/100" + "github.com/containernetworking/cni/pkg/version" +) + +type CNI interface { + // Setup setup the network for the namespace + Setup(ctx context.Context, id string, path string, opts ...NamespaceOpts) (*Result, error) + // SetupSerially sets up each of the network interfaces for the namespace in serial + SetupSerially(ctx context.Context, id string, path string, opts ...NamespaceOpts) (*Result, error) + // Remove tears down the network of the namespace. + Remove(ctx context.Context, id string, path string, opts ...NamespaceOpts) error + // Check checks if the network is still in desired state + Check(ctx context.Context, id string, path string, opts ...NamespaceOpts) error + // Load loads the cni network config + Load(opts ...Opt) error + // Status checks the status of the cni initialization + Status() error + // GetConfig returns a copy of the CNI plugin configurations as parsed by CNI + GetConfig() *ConfigResult +} + +type ConfigResult struct { + PluginDirs []string + PluginConfDir string + PluginMaxConfNum int + Prefix string + Networks []*ConfNetwork +} + +type ConfNetwork struct { + Config *NetworkConfList + IFName string +} + +// NetworkConfList is a source bytes to string version of cnilibrary.NetworkConfigList +type NetworkConfList struct { + Name string + CNIVersion string + Plugins []*NetworkConf + Source string +} + +// NetworkConf is a source bytes to string conversion of cnilibrary.NetworkConfig +type NetworkConf struct { + Network *types.NetConf + Source string +} + +type libcni struct { + config + + cniConfig cnilibrary.CNI + networkCount int // minimum network plugin configurations needed to initialize cni + networks []*Network + sync.RWMutex +} + +func defaultCNIConfig() *libcni { + return &libcni{ + config: config{ + pluginDirs: []string{DefaultCNIDir}, + pluginConfDir: DefaultNetDir, + pluginMaxConfNum: DefaultMaxConfNum, + prefix: DefaultPrefix, + }, + cniConfig: cnilibrary.NewCNIConfig( + []string{ + DefaultCNIDir, + }, + &invoke.DefaultExec{ + RawExec: &invoke.RawExec{Stderr: os.Stderr}, + PluginDecoder: version.PluginDecoder{}, + }, + ), + networkCount: 1, + } +} + +// New creates a new libcni instance. +func New(config ...Opt) (CNI, error) { + cni := defaultCNIConfig() + var err error + for _, c := range config { + if err = c(cni); err != nil { + return nil, err + } + } + return cni, nil +} + +// Load loads the latest config from cni config files. +func (c *libcni) Load(opts ...Opt) error { + var err error + c.Lock() + defer c.Unlock() + // Reset the networks on a load operation to ensure + // config happens on a clean slate + c.reset() + + for _, o := range opts { + if err = o(c); err != nil { + return fmt.Errorf("cni config load failed: %v: %w", err, ErrLoad) + } + } + return nil +} + +// Status returns the status of CNI initialization. +func (c *libcni) Status() error { + c.RLock() + defer c.RUnlock() + if len(c.networks) < c.networkCount { + return ErrCNINotInitialized + } + return nil +} + +// Networks returns all the configured networks. +// NOTE: Caller MUST NOT modify anything in the returned array. +func (c *libcni) Networks() []*Network { + c.RLock() + defer c.RUnlock() + return append([]*Network{}, c.networks...) +} + +// Setup setups the network in the namespace and returns a Result +func (c *libcni) Setup(ctx context.Context, id string, path string, opts ...NamespaceOpts) (*Result, error) { + if err := c.Status(); err != nil { + return nil, err + } + ns, err := newNamespace(id, path, opts...) + if err != nil { + return nil, err + } + result, err := c.attachNetworks(ctx, ns) + if err != nil { + return nil, err + } + return c.createResult(result) +} + +// SetupSerially setups the network in the namespace and returns a Result +func (c *libcni) SetupSerially(ctx context.Context, id string, path string, opts ...NamespaceOpts) (*Result, error) { + if err := c.Status(); err != nil { + return nil, err + } + ns, err := newNamespace(id, path, opts...) + if err != nil { + return nil, err + } + result, err := c.attachNetworksSerially(ctx, ns) + if err != nil { + return nil, err + } + return c.createResult(result) +} + +func (c *libcni) attachNetworksSerially(ctx context.Context, ns *Namespace) ([]*types100.Result, error) { + var results []*types100.Result + for _, network := range c.Networks() { + r, err := network.Attach(ctx, ns) + if err != nil { + return nil, err + } + results = append(results, r) + } + return results, nil +} + +type asynchAttachResult struct { + index int + res *types100.Result + err error +} + +func asynchAttach(ctx context.Context, index int, n *Network, ns *Namespace, wg *sync.WaitGroup, rc chan asynchAttachResult) { + defer wg.Done() + r, err := n.Attach(ctx, ns) + rc <- asynchAttachResult{index: index, res: r, err: err} +} + +func (c *libcni) attachNetworks(ctx context.Context, ns *Namespace) ([]*types100.Result, error) { + var wg sync.WaitGroup + var firstError error + results := make([]*types100.Result, len(c.Networks())) + rc := make(chan asynchAttachResult) + + for i, network := range c.Networks() { + wg.Add(1) + go asynchAttach(ctx, i, network, ns, &wg, rc) + } + + for range c.Networks() { + rs := <-rc + if rs.err != nil && firstError == nil { + firstError = rs.err + } + results[rs.index] = rs.res + } + wg.Wait() + + return results, firstError +} + +// Remove removes the network config from the namespace +func (c *libcni) Remove(ctx context.Context, id string, path string, opts ...NamespaceOpts) error { + if err := c.Status(); err != nil { + return err + } + ns, err := newNamespace(id, path, opts...) + if err != nil { + return err + } + for _, network := range c.Networks() { + if err := network.Remove(ctx, ns); err != nil { + // Based on CNI spec v0.7.0, empty network namespace is allowed to + // do best effort cleanup. However, it is not handled consistently + // right now: + // https://github.com/containernetworking/plugins/issues/210 + // TODO(random-liu): Remove the error handling when the issue is + // fixed and the CNI spec v0.6.0 support is deprecated. + // NOTE(claudiub): Some CNIs could return a "not found" error, which could mean that + // it was already deleted. + if (path == "" && strings.Contains(err.Error(), "no such file or directory")) || strings.Contains(err.Error(), "not found") { + continue + } + return err + } + } + return nil +} + +// Check checks if the network is still in desired state +func (c *libcni) Check(ctx context.Context, id string, path string, opts ...NamespaceOpts) error { + if err := c.Status(); err != nil { + return err + } + ns, err := newNamespace(id, path, opts...) + if err != nil { + return err + } + for _, network := range c.Networks() { + err := network.Check(ctx, ns) + if err != nil { + return err + } + } + + return nil +} + +// GetConfig returns a copy of the CNI plugin configurations as parsed by CNI +func (c *libcni) GetConfig() *ConfigResult { + c.RLock() + defer c.RUnlock() + r := &ConfigResult{ + PluginDirs: c.config.pluginDirs, + PluginConfDir: c.config.pluginConfDir, + PluginMaxConfNum: c.config.pluginMaxConfNum, + Prefix: c.config.prefix, + } + for _, network := range c.networks { + conf := &NetworkConfList{ + Name: network.config.Name, + CNIVersion: network.config.CNIVersion, + Source: string(network.config.Bytes), + } + for _, plugin := range network.config.Plugins { + conf.Plugins = append(conf.Plugins, &NetworkConf{ + Network: plugin.Network, + Source: string(plugin.Bytes), + }) + } + r.Networks = append(r.Networks, &ConfNetwork{ + Config: conf, + IFName: network.ifName, + }) + } + return r +} + +func (c *libcni) reset() { + c.networks = nil +} diff --git a/vendor/github.com/containerd/go-cni/deprecated.go b/vendor/github.com/containerd/go-cni/deprecated.go new file mode 100644 index 0000000000000..06afd15432dec --- /dev/null +++ b/vendor/github.com/containerd/go-cni/deprecated.go @@ -0,0 +1,34 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cni + +import types100 "github.com/containernetworking/cni/pkg/types/100" + +// Deprecated: use cni.Opt instead +type CNIOpt = Opt //revive:disable // type name will be used as cni.CNIOpt by other packages, and that stutters + +// Deprecated: use cni.Result instead +type CNIResult = Result //revive:disable // type name will be used as cni.CNIResult by other packages, and that stutters + +// GetCNIResultFromResults creates a Result from the given slice of types100.Result, +// adding structured data containing the interface configuration for each of the +// interfaces created in the namespace. It returns an error if validation of +// results fails, or if a network could not be found. +// Deprecated: do not use +func (c *libcni) GetCNIResultFromResults(results []*types100.Result) (*Result, error) { + return c.createResult(results) +} diff --git a/vendor/github.com/containerd/go-cni/errors.go b/vendor/github.com/containerd/go-cni/errors.go new file mode 100644 index 0000000000000..9c670fec21d9c --- /dev/null +++ b/vendor/github.com/containerd/go-cni/errors.go @@ -0,0 +1,55 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cni + +import ( + "errors" +) + +var ( + ErrCNINotInitialized = errors.New("cni plugin not initialized") + ErrInvalidConfig = errors.New("invalid cni config") + ErrNotFound = errors.New("not found") + ErrRead = errors.New("failed to read config file") + ErrInvalidResult = errors.New("invalid result") + ErrLoad = errors.New("failed to load cni config") +) + +// IsCNINotInitialized returns true if the error is due to cni config not being initialized +func IsCNINotInitialized(err error) bool { + return errors.Is(err, ErrCNINotInitialized) +} + +// IsInvalidConfig returns true if the error is invalid cni config +func IsInvalidConfig(err error) bool { + return errors.Is(err, ErrInvalidConfig) +} + +// IsNotFound returns true if the error is due to a missing config or result +func IsNotFound(err error) bool { + return errors.Is(err, ErrNotFound) +} + +// IsReadFailure return true if the error is a config read failure +func IsReadFailure(err error) bool { + return errors.Is(err, ErrRead) +} + +// IsInvalidResult return true if the error is due to invalid cni result +func IsInvalidResult(err error) bool { + return errors.Is(err, ErrInvalidResult) +} diff --git a/vendor/github.com/containerd/go-cni/helper.go b/vendor/github.com/containerd/go-cni/helper.go new file mode 100644 index 0000000000000..9ebd5aae1ce71 --- /dev/null +++ b/vendor/github.com/containerd/go-cni/helper.go @@ -0,0 +1,41 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cni + +import ( + "fmt" + + types100 "github.com/containernetworking/cni/pkg/types/100" +) + +func validateInterfaceConfig(ipConf *types100.IPConfig, ifs int) error { + if ipConf == nil { + return fmt.Errorf("invalid IP configuration (nil)") + } + if ipConf.Interface != nil && *ipConf.Interface > ifs { + return fmt.Errorf("invalid IP configuration (interface number %d is > number of interfaces %d)", *ipConf.Interface, ifs) + } + return nil +} + +func getIfName(prefix string, i int) string { + return fmt.Sprintf("%s%d", prefix, i) +} + +func defaultInterface(prefix string) string { + return getIfName(prefix, 0) +} diff --git a/vendor/github.com/containerd/go-cni/namespace.go b/vendor/github.com/containerd/go-cni/namespace.go new file mode 100644 index 0000000000000..319182bc05f43 --- /dev/null +++ b/vendor/github.com/containerd/go-cni/namespace.go @@ -0,0 +1,81 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cni + +import ( + "context" + + cnilibrary "github.com/containernetworking/cni/libcni" + types100 "github.com/containernetworking/cni/pkg/types/100" +) + +type Network struct { + cni cnilibrary.CNI + config *cnilibrary.NetworkConfigList + ifName string +} + +func (n *Network) Attach(ctx context.Context, ns *Namespace) (*types100.Result, error) { + r, err := n.cni.AddNetworkList(ctx, n.config, ns.config(n.ifName)) + if err != nil { + return nil, err + } + return types100.NewResultFromResult(r) +} + +func (n *Network) Remove(ctx context.Context, ns *Namespace) error { + return n.cni.DelNetworkList(ctx, n.config, ns.config(n.ifName)) +} + +func (n *Network) Check(ctx context.Context, ns *Namespace) error { + return n.cni.CheckNetworkList(ctx, n.config, ns.config(n.ifName)) +} + +type Namespace struct { + id string + path string + capabilityArgs map[string]interface{} + args map[string]string +} + +func newNamespace(id, path string, opts ...NamespaceOpts) (*Namespace, error) { + ns := &Namespace{ + id: id, + path: path, + capabilityArgs: make(map[string]interface{}), + args: make(map[string]string), + } + for _, o := range opts { + if err := o(ns); err != nil { + return nil, err + } + } + return ns, nil +} + +func (ns *Namespace) config(ifName string) *cnilibrary.RuntimeConf { + c := &cnilibrary.RuntimeConf{ + ContainerID: ns.id, + NetNS: ns.path, + IfName: ifName, + } + for k, v := range ns.args { + c.Args = append(c.Args, [2]string{k, v}) + } + c.CapabilityArgs = ns.capabilityArgs + return c +} diff --git a/vendor/github.com/containerd/go-cni/namespace_opts.go b/vendor/github.com/containerd/go-cni/namespace_opts.go new file mode 100644 index 0000000000000..3387f6fd13113 --- /dev/null +++ b/vendor/github.com/containerd/go-cni/namespace_opts.go @@ -0,0 +1,77 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cni + +type NamespaceOpts func(s *Namespace) error + +// WithCapabilityPortMap adds support for port mappings +func WithCapabilityPortMap(portMapping []PortMapping) NamespaceOpts { + return func(c *Namespace) error { + c.capabilityArgs["portMappings"] = portMapping + return nil + } +} + +// WithCapabilityIPRanges adds support for ip ranges +func WithCapabilityIPRanges(ipRanges []IPRanges) NamespaceOpts { + return func(c *Namespace) error { + c.capabilityArgs["ipRanges"] = ipRanges + return nil + } +} + +// WithCapabilityBandWitdh adds support for bandwidth limits +func WithCapabilityBandWidth(bandWidth BandWidth) NamespaceOpts { + return func(c *Namespace) error { + c.capabilityArgs["bandwidth"] = bandWidth + return nil + } +} + +// WithCapabilityDNS adds support for dns +func WithCapabilityDNS(dns DNS) NamespaceOpts { + return func(c *Namespace) error { + c.capabilityArgs["dns"] = dns + return nil + } +} + +// WithCapability support well-known capabilities +// https://www.cni.dev/docs/conventions/#well-known-capabilities +func WithCapability(name string, capability interface{}) NamespaceOpts { + return func(c *Namespace) error { + c.capabilityArgs[name] = capability + return nil + } +} + +// Args +func WithLabels(labels map[string]string) NamespaceOpts { + return func(c *Namespace) error { + for k, v := range labels { + c.args[k] = v + } + return nil + } +} + +func WithArgs(k, v string) NamespaceOpts { + return func(c *Namespace) error { + c.args[k] = v + return nil + } +} diff --git a/vendor/github.com/containerd/go-cni/opts.go b/vendor/github.com/containerd/go-cni/opts.go new file mode 100644 index 0000000000000..309d014ef1091 --- /dev/null +++ b/vendor/github.com/containerd/go-cni/opts.go @@ -0,0 +1,273 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cni + +import ( + "fmt" + "os" + "sort" + "strings" + + cnilibrary "github.com/containernetworking/cni/libcni" + "github.com/containernetworking/cni/pkg/invoke" + "github.com/containernetworking/cni/pkg/version" +) + +// Opt sets options for a CNI instance +type Opt func(c *libcni) error + +// WithInterfacePrefix sets the prefix for network interfaces +// e.g. eth or wlan +func WithInterfacePrefix(prefix string) Opt { + return func(c *libcni) error { + c.prefix = prefix + return nil + } +} + +// WithPluginDir can be used to set the locations of +// the cni plugin binaries +func WithPluginDir(dirs []string) Opt { + return func(c *libcni) error { + c.pluginDirs = dirs + c.cniConfig = cnilibrary.NewCNIConfig( + dirs, + &invoke.DefaultExec{ + RawExec: &invoke.RawExec{Stderr: os.Stderr}, + PluginDecoder: version.PluginDecoder{}, + }, + ) + return nil + } +} + +// WithPluginConfDir can be used to configure the +// cni configuration directory. +func WithPluginConfDir(dir string) Opt { + return func(c *libcni) error { + c.pluginConfDir = dir + return nil + } +} + +// WithPluginMaxConfNum can be used to configure the +// max cni plugin config file num. +func WithPluginMaxConfNum(max int) Opt { + return func(c *libcni) error { + c.pluginMaxConfNum = max + return nil + } +} + +// WithMinNetworkCount can be used to configure the +// minimum networks to be configured and initialized +// for the status to report success. By default its 1. +func WithMinNetworkCount(count int) Opt { + return func(c *libcni) error { + c.networkCount = count + return nil + } +} + +// WithLoNetwork can be used to load the loopback +// network config. +func WithLoNetwork(c *libcni) error { + loConfig, _ := cnilibrary.ConfListFromBytes([]byte(`{ +"cniVersion": "0.3.1", +"name": "cni-loopback", +"plugins": [{ + "type": "loopback" +}] +}`)) + + c.networks = append(c.networks, &Network{ + cni: c.cniConfig, + config: loConfig, + ifName: "lo", + }) + return nil +} + +// WithConf can be used to load config directly +// from byte. +func WithConf(bytes []byte) Opt { + return WithConfIndex(bytes, 0) +} + +// WithConfIndex can be used to load config directly +// from byte and set the interface name's index. +func WithConfIndex(bytes []byte, index int) Opt { + return func(c *libcni) error { + conf, err := cnilibrary.ConfFromBytes(bytes) + if err != nil { + return err + } + confList, err := cnilibrary.ConfListFromConf(conf) + if err != nil { + return err + } + c.networks = append(c.networks, &Network{ + cni: c.cniConfig, + config: confList, + ifName: getIfName(c.prefix, index), + }) + return nil + } +} + +// WithConfFile can be used to load network config +// from an .conf file. Supported with absolute fileName +// with path only. +func WithConfFile(fileName string) Opt { + return func(c *libcni) error { + conf, err := cnilibrary.ConfFromFile(fileName) + if err != nil { + return err + } + // upconvert to conf list + confList, err := cnilibrary.ConfListFromConf(conf) + if err != nil { + return err + } + c.networks = append(c.networks, &Network{ + cni: c.cniConfig, + config: confList, + ifName: getIfName(c.prefix, 0), + }) + return nil + } +} + +// WithConfListBytes can be used to load network config list directly +// from byte +func WithConfListBytes(bytes []byte) Opt { + return func(c *libcni) error { + confList, err := cnilibrary.ConfListFromBytes(bytes) + if err != nil { + return err + } + i := len(c.networks) + c.networks = append(c.networks, &Network{ + cni: c.cniConfig, + config: confList, + ifName: getIfName(c.prefix, i), + }) + return nil + } +} + +// WithConfListFile can be used to load network config +// from an .conflist file. Supported with absolute fileName +// with path only. +func WithConfListFile(fileName string) Opt { + return func(c *libcni) error { + confList, err := cnilibrary.ConfListFromFile(fileName) + if err != nil { + return err + } + i := len(c.networks) + c.networks = append(c.networks, &Network{ + cni: c.cniConfig, + config: confList, + ifName: getIfName(c.prefix, i), + }) + return nil + } +} + +// WithDefaultConf can be used to detect the default network +// config file from the configured cni config directory and load +// it. +// Since the CNI spec does not specify a way to detect default networks, +// the convention chosen is - the first network configuration in the sorted +// list of network conf files as the default network. +func WithDefaultConf(c *libcni) error { + return loadFromConfDir(c, c.pluginMaxConfNum) +} + +// WithAllConf can be used to detect all network config +// files from the configured cni config directory and load +// them. +func WithAllConf(c *libcni) error { + return loadFromConfDir(c, 0) +} + +// loadFromConfDir detects network config files from the +// configured cni config directory and load them. max is +// the maximum network config to load (max i<= 0 means no limit). +func loadFromConfDir(c *libcni, max int) error { + files, err := cnilibrary.ConfFiles(c.pluginConfDir, []string{".conf", ".conflist", ".json"}) + switch { + case err != nil: + return fmt.Errorf("failed to read config file: %v: %w", err, ErrRead) + case len(files) == 0: + return fmt.Errorf("no network config found in %s: %w", c.pluginConfDir, ErrCNINotInitialized) + } + + // files contains the network config files associated with cni network. + // Use lexicographical way as a defined order for network config files. + sort.Strings(files) + // Since the CNI spec does not specify a way to detect default networks, + // the convention chosen is - the first network configuration in the sorted + // list of network conf files as the default network and choose the default + // interface provided during init as the network interface for this default + // network. For every other network use a generated interface id. + i := 0 + var networks []*Network + for _, confFile := range files { + var confList *cnilibrary.NetworkConfigList + if strings.HasSuffix(confFile, ".conflist") { + confList, err = cnilibrary.ConfListFromFile(confFile) + if err != nil { + return fmt.Errorf("failed to load CNI config list file %s: %v: %w", confFile, err, ErrInvalidConfig) + } + } else { + conf, err := cnilibrary.ConfFromFile(confFile) + if err != nil { + return fmt.Errorf("failed to load CNI config file %s: %v: %w", confFile, err, ErrInvalidConfig) + } + // Ensure the config has a "type" so we know what plugin to run. + // Also catches the case where somebody put a conflist into a conf file. + if conf.Network.Type == "" { + return fmt.Errorf("network type not found in %s: %w", confFile, ErrInvalidConfig) + } + + confList, err = cnilibrary.ConfListFromConf(conf) + if err != nil { + return fmt.Errorf("failed to convert CNI config file %s to CNI config list: %v: %w", confFile, err, ErrInvalidConfig) + } + } + if len(confList.Plugins) == 0 { + return fmt.Errorf("CNI config list in config file %s has no networks, skipping: %w", confFile, ErrInvalidConfig) + + } + networks = append(networks, &Network{ + cni: c.cniConfig, + config: confList, + ifName: getIfName(c.prefix, i), + }) + i++ + if i == max { + break + } + } + if len(networks) == 0 { + return fmt.Errorf("no valid networks found in %s: %w", c.pluginDirs, ErrCNINotInitialized) + } + c.networks = append(c.networks, networks...) + return nil +} diff --git a/vendor/github.com/containerd/go-cni/result.go b/vendor/github.com/containerd/go-cni/result.go new file mode 100644 index 0000000000000..7bc115543bb09 --- /dev/null +++ b/vendor/github.com/containerd/go-cni/result.go @@ -0,0 +1,114 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cni + +import ( + "fmt" + "net" + + "github.com/containernetworking/cni/pkg/types" + types100 "github.com/containernetworking/cni/pkg/types/100" +) + +type IPConfig struct { + IP net.IP + Gateway net.IP +} + +// Result contains the network information returned by CNI.Setup +// +// a) Interfaces list. Depending on the plugin, this can include the sandbox +// (eg, container or hypervisor) interface name and/or the host interface +// name, the hardware addresses of each interface, and details about the +// sandbox (if any) the interface is in. +// b) IP configuration assigned to each interface. The IPv4 and/or IPv6 addresses, +// gateways, and routes assigned to sandbox and/or host interfaces. +// c) DNS information. Dictionary that includes DNS information for nameservers, +// domain, search domains and options. +type Result struct { + Interfaces map[string]*Config + DNS []types.DNS + Routes []*types.Route + raw []*types100.Result +} + +// Raw returns the raw CNI results of multiple networks. +func (r *Result) Raw() []*types100.Result { + return r.raw +} + +type Config struct { + IPConfigs []*IPConfig + Mac string + Sandbox string +} + +// createResult creates a Result from the given slice of types100.Result, adding +// structured data containing the interface configuration for each of the +// interfaces created in the namespace. It returns an error if validation of +// results fails, or if a network could not be found. +func (c *libcni) createResult(results []*types100.Result) (*Result, error) { + c.RLock() + defer c.RUnlock() + r := &Result{ + Interfaces: make(map[string]*Config), + raw: results, + } + + // Plugins may not need to return Interfaces in result if + // if there are no multiple interfaces created. In that case + // all configs should be applied against default interface + r.Interfaces[defaultInterface(c.prefix)] = &Config{} + + // Walk through all the results + for _, result := range results { + // Walk through all the interface in each result + for _, intf := range result.Interfaces { + r.Interfaces[intf.Name] = &Config{ + Mac: intf.Mac, + Sandbox: intf.Sandbox, + } + } + // Walk through all the IPs in the result and attach it to corresponding + // interfaces + for _, ipConf := range result.IPs { + if err := validateInterfaceConfig(ipConf, len(result.Interfaces)); err != nil { + return nil, fmt.Errorf("invalid interface config: %v: %w", err, ErrInvalidResult) + } + name := c.getInterfaceName(result.Interfaces, ipConf) + r.Interfaces[name].IPConfigs = append(r.Interfaces[name].IPConfigs, + &IPConfig{IP: ipConf.Address.IP, Gateway: ipConf.Gateway}) + } + r.DNS = append(r.DNS, result.DNS) + r.Routes = append(r.Routes, result.Routes...) + } + if _, ok := r.Interfaces[defaultInterface(c.prefix)]; !ok { + return nil, fmt.Errorf("default network not found for: %s: %w", defaultInterface(c.prefix), ErrNotFound) + } + return r, nil +} + +// getInterfaceName returns the interface name if the plugins +// return the result with associated interfaces. If interface +// is not present then default interface name is used +func (c *libcni) getInterfaceName(interfaces []*types100.Interface, + ipConf *types100.IPConfig) string { + if ipConf.Interface != nil { + return interfaces[*ipConf.Interface].Name + } + return defaultInterface(c.prefix) +} diff --git a/vendor/github.com/containerd/go-cni/testutils.go b/vendor/github.com/containerd/go-cni/testutils.go new file mode 100644 index 0000000000000..d9453c8d983d9 --- /dev/null +++ b/vendor/github.com/containerd/go-cni/testutils.go @@ -0,0 +1,78 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cni + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "testing" +) + +func makeTmpDir(prefix string) (string, error) { + tmpDir, err := ioutil.TempDir(os.TempDir(), prefix) + if err != nil { + return "", err + } + return tmpDir, nil +} + +func makeFakeCNIConfig(t *testing.T) (string, string) { + cniDir, err := makeTmpDir("fakecni") + if err != nil { + t.Fatalf("Failed to create plugin config dir: %v", err) + } + + cniConfDir := path.Join(cniDir, "net.d") + err = os.MkdirAll(cniConfDir, 0777) + if err != nil { + t.Fatalf("Failed to create network config dir: %v", err) + } + + networkConfig1 := path.Join(cniConfDir, "mocknetwork1.conf") + f1, err := os.Create(networkConfig1) + if err != nil { + t.Fatalf("Failed to create network config %v: %v", f1, err) + } + networkConfig2 := path.Join(cniConfDir, "mocknetwork2.conf") + f2, err := os.Create(networkConfig2) + if err != nil { + t.Fatalf("Failed to create network config %v: %v", f2, err) + } + + cfg1 := fmt.Sprintf(`{ "name": "%s", "type": "%s", "capabilities": {"portMappings": true} }`, "plugin1", "fakecni") + _, err = f1.WriteString(cfg1) + if err != nil { + t.Fatalf("Failed to write network config file %v: %v", f1, err) + } + f1.Close() + cfg2 := fmt.Sprintf(`{ "name": "%s", "type": "%s", "capabilities": {"portMappings": true} }`, "plugin2", "fakecni") + _, err = f2.WriteString(cfg2) + if err != nil { + t.Fatalf("Failed to write network config file %v: %v", f2, err) + } + f2.Close() + return cniDir, cniConfDir +} + +func tearDownCNIConfig(t *testing.T, confDir string) { + err := os.RemoveAll(confDir) + if err != nil { + t.Fatalf("Failed to cleanup CNI configs: %v", err) + } +} diff --git a/vendor/github.com/containerd/go-cni/types.go b/vendor/github.com/containerd/go-cni/types.go new file mode 100644 index 0000000000000..0b7db1ee0a637 --- /dev/null +++ b/vendor/github.com/containerd/go-cni/types.go @@ -0,0 +1,65 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cni + +const ( + CNIPluginName = "cni" + DefaultNetDir = "/etc/cni/net.d" + DefaultCNIDir = "/opt/cni/bin" + DefaultMaxConfNum = 1 + VendorCNIDirTemplate = "%s/opt/%s/bin" + DefaultPrefix = "eth" +) + +type config struct { + pluginDirs []string + pluginConfDir string + pluginMaxConfNum int + prefix string +} + +type PortMapping struct { + HostPort int32 + ContainerPort int32 + Protocol string + HostIP string +} + +type IPRanges struct { + Subnet string + RangeStart string + RangeEnd string + Gateway string +} + +// BandWidth defines the ingress/egress rate and burst limits +type BandWidth struct { + IngressRate uint64 + IngressBurst uint64 + EgressRate uint64 + EgressBurst uint64 +} + +// DNS defines the dns config +type DNS struct { + // List of DNS servers of the cluster. + Servers []string + // List of DNS search domains of the cluster. + Searches []string + // List of DNS options. + Options []string +} diff --git a/vendor/github.com/containernetworking/cni/LICENSE b/vendor/github.com/containernetworking/cni/LICENSE new file mode 100644 index 0000000000000..8f71f43fee3f7 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/containernetworking/cni/libcni/api.go b/vendor/github.com/containernetworking/cni/libcni/api.go new file mode 100644 index 0000000000000..0d82a2dd3c60b --- /dev/null +++ b/vendor/github.com/containernetworking/cni/libcni/api.go @@ -0,0 +1,679 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package libcni + +// Note this is the actual implementation of the CNI specification, which +// is reflected in the https://github.com/containernetworking/cni/blob/master/SPEC.md file +// it is typically bundled into runtime providers (i.e. containerd or cri-o would use this +// before calling runc or hcsshim). It is also bundled into CNI providers as well, for example, +// to add an IP to a container, to parse the configuration of the CNI and so on. + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/containernetworking/cni/pkg/invoke" + "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/types/create" + "github.com/containernetworking/cni/pkg/utils" + "github.com/containernetworking/cni/pkg/version" +) + +var ( + CacheDir = "/var/lib/cni" +) + +const ( + CNICacheV1 = "cniCacheV1" +) + +// A RuntimeConf holds the arguments to one invocation of a CNI plugin +// excepting the network configuration, with the nested exception that +// the `runtimeConfig` from the network configuration is included +// here. +type RuntimeConf struct { + ContainerID string + NetNS string + IfName string + Args [][2]string + // A dictionary of capability-specific data passed by the runtime + // to plugins as top-level keys in the 'runtimeConfig' dictionary + // of the plugin's stdin data. libcni will ensure that only keys + // in this map which match the capabilities of the plugin are passed + // to the plugin + CapabilityArgs map[string]interface{} + + // DEPRECATED. Will be removed in a future release. + CacheDir string +} + +type NetworkConfig struct { + Network *types.NetConf + Bytes []byte +} + +type NetworkConfigList struct { + Name string + CNIVersion string + DisableCheck bool + Plugins []*NetworkConfig + Bytes []byte +} + +type CNI interface { + AddNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) (types.Result, error) + CheckNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error + DelNetworkList(ctx context.Context, net *NetworkConfigList, rt *RuntimeConf) error + GetNetworkListCachedResult(net *NetworkConfigList, rt *RuntimeConf) (types.Result, error) + GetNetworkListCachedConfig(net *NetworkConfigList, rt *RuntimeConf) ([]byte, *RuntimeConf, error) + + AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error) + CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error + DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error + GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) + GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error) + + ValidateNetworkList(ctx context.Context, net *NetworkConfigList) ([]string, error) + ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) +} + +type CNIConfig struct { + Path []string + exec invoke.Exec + cacheDir string +} + +// CNIConfig implements the CNI interface +var _ CNI = &CNIConfig{} + +// NewCNIConfig returns a new CNIConfig object that will search for plugins +// in the given paths and use the given exec interface to run those plugins, +// or if the exec interface is not given, will use a default exec handler. +func NewCNIConfig(path []string, exec invoke.Exec) *CNIConfig { + return NewCNIConfigWithCacheDir(path, "", exec) +} + +// NewCNIConfigWithCacheDir returns a new CNIConfig object that will search for plugins +// in the given paths use the given exec interface to run those plugins, +// or if the exec interface is not given, will use a default exec handler. +// The given cache directory will be used for temporary data storage when needed. +func NewCNIConfigWithCacheDir(path []string, cacheDir string, exec invoke.Exec) *CNIConfig { + return &CNIConfig{ + Path: path, + cacheDir: cacheDir, + exec: exec, + } +} + +func buildOneConfig(name, cniVersion string, orig *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (*NetworkConfig, error) { + var err error + + inject := map[string]interface{}{ + "name": name, + "cniVersion": cniVersion, + } + // Add previous plugin result + if prevResult != nil { + inject["prevResult"] = prevResult + } + + // Ensure every config uses the same name and version + orig, err = InjectConf(orig, inject) + if err != nil { + return nil, err + } + + return injectRuntimeConfig(orig, rt) +} + +// This function takes a libcni RuntimeConf structure and injects values into +// a "runtimeConfig" dictionary in the CNI network configuration JSON that +// will be passed to the plugin on stdin. +// +// Only "capabilities arguments" passed by the runtime are currently injected. +// These capabilities arguments are filtered through the plugin's advertised +// capabilities from its config JSON, and any keys in the CapabilityArgs +// matching plugin capabilities are added to the "runtimeConfig" dictionary +// sent to the plugin via JSON on stdin. For example, if the plugin's +// capabilities include "portMappings", and the CapabilityArgs map includes a +// "portMappings" key, that key and its value are added to the "runtimeConfig" +// dictionary to be passed to the plugin's stdin. +func injectRuntimeConfig(orig *NetworkConfig, rt *RuntimeConf) (*NetworkConfig, error) { + var err error + + rc := make(map[string]interface{}) + for capability, supported := range orig.Network.Capabilities { + if !supported { + continue + } + if data, ok := rt.CapabilityArgs[capability]; ok { + rc[capability] = data + } + } + + if len(rc) > 0 { + orig, err = InjectConf(orig, map[string]interface{}{"runtimeConfig": rc}) + if err != nil { + return nil, err + } + } + + return orig, nil +} + +// ensure we have a usable exec if the CNIConfig was not given one +func (c *CNIConfig) ensureExec() invoke.Exec { + if c.exec == nil { + c.exec = &invoke.DefaultExec{ + RawExec: &invoke.RawExec{Stderr: os.Stderr}, + PluginDecoder: version.PluginDecoder{}, + } + } + return c.exec +} + +type cachedInfo struct { + Kind string `json:"kind"` + ContainerID string `json:"containerId"` + Config []byte `json:"config"` + IfName string `json:"ifName"` + NetworkName string `json:"networkName"` + CniArgs [][2]string `json:"cniArgs,omitempty"` + CapabilityArgs map[string]interface{} `json:"capabilityArgs,omitempty"` + RawResult map[string]interface{} `json:"result,omitempty"` + Result types.Result `json:"-"` +} + +// getCacheDir returns the cache directory in this order: +// 1) global cacheDir from CNIConfig object +// 2) deprecated cacheDir from RuntimeConf object +// 3) fall back to default cache directory +func (c *CNIConfig) getCacheDir(rt *RuntimeConf) string { + if c.cacheDir != "" { + return c.cacheDir + } + if rt.CacheDir != "" { + return rt.CacheDir + } + return CacheDir +} + +func (c *CNIConfig) getCacheFilePath(netName string, rt *RuntimeConf) (string, error) { + if netName == "" || rt.ContainerID == "" || rt.IfName == "" { + return "", fmt.Errorf("cache file path requires network name (%q), container ID (%q), and interface name (%q)", netName, rt.ContainerID, rt.IfName) + } + return filepath.Join(c.getCacheDir(rt), "results", fmt.Sprintf("%s-%s-%s", netName, rt.ContainerID, rt.IfName)), nil +} + +func (c *CNIConfig) cacheAdd(result types.Result, config []byte, netName string, rt *RuntimeConf) error { + cached := cachedInfo{ + Kind: CNICacheV1, + ContainerID: rt.ContainerID, + Config: config, + IfName: rt.IfName, + NetworkName: netName, + CniArgs: rt.Args, + CapabilityArgs: rt.CapabilityArgs, + } + + // We need to get type.Result into cachedInfo as JSON map + // Marshal to []byte, then Unmarshal into cached.RawResult + data, err := json.Marshal(result) + if err != nil { + return err + } + + err = json.Unmarshal(data, &cached.RawResult) + if err != nil { + return err + } + + newBytes, err := json.Marshal(&cached) + if err != nil { + return err + } + + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + return err + } + if err := os.MkdirAll(filepath.Dir(fname), 0700); err != nil { + return err + } + + return ioutil.WriteFile(fname, newBytes, 0600) +} + +func (c *CNIConfig) cacheDel(netName string, rt *RuntimeConf) error { + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + // Ignore error + return nil + } + return os.Remove(fname) +} + +func (c *CNIConfig) getCachedConfig(netName string, rt *RuntimeConf) ([]byte, *RuntimeConf, error) { + var bytes []byte + + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + return nil, nil, err + } + bytes, err = ioutil.ReadFile(fname) + if err != nil { + // Ignore read errors; the cached result may not exist on-disk + return nil, nil, nil + } + + unmarshaled := cachedInfo{} + if err := json.Unmarshal(bytes, &unmarshaled); err != nil { + return nil, nil, fmt.Errorf("failed to unmarshal cached network %q config: %w", netName, err) + } + if unmarshaled.Kind != CNICacheV1 { + return nil, nil, fmt.Errorf("read cached network %q config has wrong kind: %v", netName, unmarshaled.Kind) + } + + newRt := *rt + if unmarshaled.CniArgs != nil { + newRt.Args = unmarshaled.CniArgs + } + newRt.CapabilityArgs = unmarshaled.CapabilityArgs + + return unmarshaled.Config, &newRt, nil +} + +func (c *CNIConfig) getLegacyCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) { + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + return nil, err + } + data, err := ioutil.ReadFile(fname) + if err != nil { + // Ignore read errors; the cached result may not exist on-disk + return nil, nil + } + + // Load the cached result + result, err := create.CreateFromBytes(data) + if err != nil { + return nil, err + } + + // Convert to the config version to ensure plugins get prevResult + // in the same version as the config. The cached result version + // should match the config version unless the config was changed + // while the container was running. + result, err = result.GetAsVersion(cniVersion) + if err != nil { + return nil, fmt.Errorf("failed to convert cached result to config version %q: %w", cniVersion, err) + } + return result, nil +} + +func (c *CNIConfig) getCachedResult(netName, cniVersion string, rt *RuntimeConf) (types.Result, error) { + fname, err := c.getCacheFilePath(netName, rt) + if err != nil { + return nil, err + } + fdata, err := ioutil.ReadFile(fname) + if err != nil { + // Ignore read errors; the cached result may not exist on-disk + return nil, nil + } + + cachedInfo := cachedInfo{} + if err := json.Unmarshal(fdata, &cachedInfo); err != nil || cachedInfo.Kind != CNICacheV1 { + return c.getLegacyCachedResult(netName, cniVersion, rt) + } + + newBytes, err := json.Marshal(&cachedInfo.RawResult) + if err != nil { + return nil, fmt.Errorf("failed to marshal cached network %q config: %w", netName, err) + } + + // Load the cached result + result, err := create.CreateFromBytes(newBytes) + if err != nil { + return nil, err + } + + // Convert to the config version to ensure plugins get prevResult + // in the same version as the config. The cached result version + // should match the config version unless the config was changed + // while the container was running. + result, err = result.GetAsVersion(cniVersion) + if err != nil { + return nil, fmt.Errorf("failed to convert cached result to config version %q: %w", cniVersion, err) + } + return result, nil +} + +// GetNetworkListCachedResult returns the cached Result of the previous +// AddNetworkList() operation for a network list, or an error. +func (c *CNIConfig) GetNetworkListCachedResult(list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) { + return c.getCachedResult(list.Name, list.CNIVersion, rt) +} + +// GetNetworkCachedResult returns the cached Result of the previous +// AddNetwork() operation for a network, or an error. +func (c *CNIConfig) GetNetworkCachedResult(net *NetworkConfig, rt *RuntimeConf) (types.Result, error) { + return c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) +} + +// GetNetworkListCachedConfig copies the input RuntimeConf to output +// RuntimeConf with fields updated with info from the cached Config. +func (c *CNIConfig) GetNetworkListCachedConfig(list *NetworkConfigList, rt *RuntimeConf) ([]byte, *RuntimeConf, error) { + return c.getCachedConfig(list.Name, rt) +} + +// GetNetworkCachedConfig copies the input RuntimeConf to output +// RuntimeConf with fields updated with info from the cached Config. +func (c *CNIConfig) GetNetworkCachedConfig(net *NetworkConfig, rt *RuntimeConf) ([]byte, *RuntimeConf, error) { + return c.getCachedConfig(net.Network.Name, rt) +} + +func (c *CNIConfig) addNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) (types.Result, error) { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return nil, err + } + if err := utils.ValidateContainerID(rt.ContainerID); err != nil { + return nil, err + } + if err := utils.ValidateNetworkName(name); err != nil { + return nil, err + } + if err := utils.ValidateInterfaceName(rt.IfName); err != nil { + return nil, err + } + + newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) + if err != nil { + return nil, err + } + + return invoke.ExecPluginWithResult(ctx, pluginPath, newConf.Bytes, c.args("ADD", rt), c.exec) +} + +// AddNetworkList executes a sequence of plugins with the ADD command +func (c *CNIConfig) AddNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) (types.Result, error) { + var err error + var result types.Result + for _, net := range list.Plugins { + result, err = c.addNetwork(ctx, list.Name, list.CNIVersion, net, result, rt) + if err != nil { + return nil, fmt.Errorf("plugin %s failed (add): %w", pluginDescription(net.Network), err) + } + } + + if err = c.cacheAdd(result, list.Bytes, list.Name, rt); err != nil { + return nil, fmt.Errorf("failed to set network %q cached result: %w", list.Name, err) + } + + return result, nil +} + +func (c *CNIConfig) checkNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return err + } + + newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) + if err != nil { + return err + } + + return invoke.ExecPluginWithoutResult(ctx, pluginPath, newConf.Bytes, c.args("CHECK", rt), c.exec) +} + +// CheckNetworkList executes a sequence of plugins with the CHECK command +func (c *CNIConfig) CheckNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) error { + // CHECK was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil { + return err + } else if !gtet { + return fmt.Errorf("configuration version %q does not support the CHECK command", list.CNIVersion) + } + + if list.DisableCheck { + return nil + } + + cachedResult, err := c.getCachedResult(list.Name, list.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %w", list.Name, err) + } + + for _, net := range list.Plugins { + if err := c.checkNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil { + return err + } + } + + return nil +} + +func (c *CNIConfig) delNetwork(ctx context.Context, name, cniVersion string, net *NetworkConfig, prevResult types.Result, rt *RuntimeConf) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(net.Network.Type, c.Path) + if err != nil { + return err + } + + newConf, err := buildOneConfig(name, cniVersion, net, prevResult, rt) + if err != nil { + return err + } + + return invoke.ExecPluginWithoutResult(ctx, pluginPath, newConf.Bytes, c.args("DEL", rt), c.exec) +} + +// DelNetworkList executes a sequence of plugins with the DEL command +func (c *CNIConfig) DelNetworkList(ctx context.Context, list *NetworkConfigList, rt *RuntimeConf) error { + var cachedResult types.Result + + // Cached result on DEL was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(list.CNIVersion, "0.4.0"); err != nil { + return err + } else if gtet { + cachedResult, err = c.getCachedResult(list.Name, list.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %w", list.Name, err) + } + } + + for i := len(list.Plugins) - 1; i >= 0; i-- { + net := list.Plugins[i] + if err := c.delNetwork(ctx, list.Name, list.CNIVersion, net, cachedResult, rt); err != nil { + return fmt.Errorf("plugin %s failed (delete): %w", pluginDescription(net.Network), err) + } + } + _ = c.cacheDel(list.Name, rt) + + return nil +} + +func pluginDescription(net *types.NetConf) string { + if net == nil { + return "" + } + pluginType := net.Type + out := fmt.Sprintf("type=%q", pluginType) + name := net.Name + if name != "" { + out += fmt.Sprintf(" name=%q", name) + } + return out +} + +// AddNetwork executes the plugin with the ADD command +func (c *CNIConfig) AddNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) (types.Result, error) { + result, err := c.addNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, nil, rt) + if err != nil { + return nil, err + } + + if err = c.cacheAdd(result, net.Bytes, net.Network.Name, rt); err != nil { + return nil, fmt.Errorf("failed to set network %q cached result: %w", net.Network.Name, err) + } + + return result, nil +} + +// CheckNetwork executes the plugin with the CHECK command +func (c *CNIConfig) CheckNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error { + // CHECK was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil { + return err + } else if !gtet { + return fmt.Errorf("configuration version %q does not support the CHECK command", net.Network.CNIVersion) + } + + cachedResult, err := c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %w", net.Network.Name, err) + } + return c.checkNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt) +} + +// DelNetwork executes the plugin with the DEL command +func (c *CNIConfig) DelNetwork(ctx context.Context, net *NetworkConfig, rt *RuntimeConf) error { + var cachedResult types.Result + + // Cached result on DEL was added in CNI spec version 0.4.0 and higher + if gtet, err := version.GreaterThanOrEqualTo(net.Network.CNIVersion, "0.4.0"); err != nil { + return err + } else if gtet { + cachedResult, err = c.getCachedResult(net.Network.Name, net.Network.CNIVersion, rt) + if err != nil { + return fmt.Errorf("failed to get network %q cached result: %w", net.Network.Name, err) + } + } + + if err := c.delNetwork(ctx, net.Network.Name, net.Network.CNIVersion, net, cachedResult, rt); err != nil { + return err + } + _ = c.cacheDel(net.Network.Name, rt) + return nil +} + +// ValidateNetworkList checks that a configuration is reasonably valid. +// - all the specified plugins exist on disk +// - every plugin supports the desired version. +// +// Returns a list of all capabilities supported by the configuration, or error +func (c *CNIConfig) ValidateNetworkList(ctx context.Context, list *NetworkConfigList) ([]string, error) { + version := list.CNIVersion + + // holding map for seen caps (in case of duplicates) + caps := map[string]interface{}{} + + errs := []error{} + for _, net := range list.Plugins { + if err := c.validatePlugin(ctx, net.Network.Type, version); err != nil { + errs = append(errs, err) + } + for c, enabled := range net.Network.Capabilities { + if !enabled { + continue + } + caps[c] = struct{}{} + } + } + + if len(errs) > 0 { + return nil, fmt.Errorf("%v", errs) + } + + // make caps list + cc := make([]string, 0, len(caps)) + for c := range caps { + cc = append(cc, c) + } + + return cc, nil +} + +// ValidateNetwork checks that a configuration is reasonably valid. +// It uses the same logic as ValidateNetworkList) +// Returns a list of capabilities +func (c *CNIConfig) ValidateNetwork(ctx context.Context, net *NetworkConfig) ([]string, error) { + caps := []string{} + for c, ok := range net.Network.Capabilities { + if ok { + caps = append(caps, c) + } + } + if err := c.validatePlugin(ctx, net.Network.Type, net.Network.CNIVersion); err != nil { + return nil, err + } + return caps, nil +} + +// validatePlugin checks that an individual plugin's configuration is sane +func (c *CNIConfig) validatePlugin(ctx context.Context, pluginName, expectedVersion string) error { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(pluginName, c.Path) + if err != nil { + return err + } + if expectedVersion == "" { + expectedVersion = "0.1.0" + } + + vi, err := invoke.GetVersionInfo(ctx, pluginPath, c.exec) + if err != nil { + return err + } + for _, vers := range vi.SupportedVersions() { + if vers == expectedVersion { + return nil + } + } + return fmt.Errorf("plugin %s does not support config version %q", pluginName, expectedVersion) +} + +// GetVersionInfo reports which versions of the CNI spec are supported by +// the given plugin. +func (c *CNIConfig) GetVersionInfo(ctx context.Context, pluginType string) (version.PluginInfo, error) { + c.ensureExec() + pluginPath, err := c.exec.FindInPath(pluginType, c.Path) + if err != nil { + return nil, err + } + + return invoke.GetVersionInfo(ctx, pluginPath, c.exec) +} + +// ===== +func (c *CNIConfig) args(action string, rt *RuntimeConf) *invoke.Args { + return &invoke.Args{ + Command: action, + ContainerID: rt.ContainerID, + NetNS: rt.NetNS, + PluginArgs: rt.Args, + IfName: rt.IfName, + Path: strings.Join(c.Path, string(os.PathListSeparator)), + } +} diff --git a/vendor/github.com/containernetworking/cni/libcni/conf.go b/vendor/github.com/containernetworking/cni/libcni/conf.go new file mode 100644 index 0000000000000..3cd6a59d1c09a --- /dev/null +++ b/vendor/github.com/containernetworking/cni/libcni/conf.go @@ -0,0 +1,270 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package libcni + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" + + "github.com/containernetworking/cni/pkg/types" +) + +type NotFoundError struct { + Dir string + Name string +} + +func (e NotFoundError) Error() string { + return fmt.Sprintf(`no net configuration with name "%s" in %s`, e.Name, e.Dir) +} + +type NoConfigsFoundError struct { + Dir string +} + +func (e NoConfigsFoundError) Error() string { + return fmt.Sprintf(`no net configurations found in %s`, e.Dir) +} + +func ConfFromBytes(bytes []byte) (*NetworkConfig, error) { + conf := &NetworkConfig{Bytes: bytes, Network: &types.NetConf{}} + if err := json.Unmarshal(bytes, conf.Network); err != nil { + return nil, fmt.Errorf("error parsing configuration: %w", err) + } + if conf.Network.Type == "" { + return nil, fmt.Errorf("error parsing configuration: missing 'type'") + } + return conf, nil +} + +func ConfFromFile(filename string) (*NetworkConfig, error) { + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("error reading %s: %w", filename, err) + } + return ConfFromBytes(bytes) +} + +func ConfListFromBytes(bytes []byte) (*NetworkConfigList, error) { + rawList := make(map[string]interface{}) + if err := json.Unmarshal(bytes, &rawList); err != nil { + return nil, fmt.Errorf("error parsing configuration list: %w", err) + } + + rawName, ok := rawList["name"] + if !ok { + return nil, fmt.Errorf("error parsing configuration list: no name") + } + name, ok := rawName.(string) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid name type %T", rawName) + } + + var cniVersion string + rawVersion, ok := rawList["cniVersion"] + if ok { + cniVersion, ok = rawVersion.(string) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid cniVersion type %T", rawVersion) + } + } + + disableCheck := false + if rawDisableCheck, ok := rawList["disableCheck"]; ok { + disableCheck, ok = rawDisableCheck.(bool) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid disableCheck type %T", rawDisableCheck) + } + } + + list := &NetworkConfigList{ + Name: name, + DisableCheck: disableCheck, + CNIVersion: cniVersion, + Bytes: bytes, + } + + var plugins []interface{} + plug, ok := rawList["plugins"] + if !ok { + return nil, fmt.Errorf("error parsing configuration list: no 'plugins' key") + } + plugins, ok = plug.([]interface{}) + if !ok { + return nil, fmt.Errorf("error parsing configuration list: invalid 'plugins' type %T", plug) + } + if len(plugins) == 0 { + return nil, fmt.Errorf("error parsing configuration list: no plugins in list") + } + + for i, conf := range plugins { + newBytes, err := json.Marshal(conf) + if err != nil { + return nil, fmt.Errorf("failed to marshal plugin config %d: %w", i, err) + } + netConf, err := ConfFromBytes(newBytes) + if err != nil { + return nil, fmt.Errorf("failed to parse plugin config %d: %w", i, err) + } + list.Plugins = append(list.Plugins, netConf) + } + + return list, nil +} + +func ConfListFromFile(filename string) (*NetworkConfigList, error) { + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("error reading %s: %w", filename, err) + } + return ConfListFromBytes(bytes) +} + +func ConfFiles(dir string, extensions []string) ([]string, error) { + // In part, adapted from rkt/networking/podenv.go#listFiles + files, err := ioutil.ReadDir(dir) + switch { + case err == nil: // break + case os.IsNotExist(err): + return nil, nil + default: + return nil, err + } + + confFiles := []string{} + for _, f := range files { + if f.IsDir() { + continue + } + fileExt := filepath.Ext(f.Name()) + for _, ext := range extensions { + if fileExt == ext { + confFiles = append(confFiles, filepath.Join(dir, f.Name())) + } + } + } + return confFiles, nil +} + +func LoadConf(dir, name string) (*NetworkConfig, error) { + files, err := ConfFiles(dir, []string{".conf", ".json"}) + switch { + case err != nil: + return nil, err + case len(files) == 0: + return nil, NoConfigsFoundError{Dir: dir} + } + sort.Strings(files) + + for _, confFile := range files { + conf, err := ConfFromFile(confFile) + if err != nil { + return nil, err + } + if conf.Network.Name == name { + return conf, nil + } + } + return nil, NotFoundError{dir, name} +} + +func LoadConfList(dir, name string) (*NetworkConfigList, error) { + files, err := ConfFiles(dir, []string{".conflist"}) + if err != nil { + return nil, err + } + sort.Strings(files) + + for _, confFile := range files { + conf, err := ConfListFromFile(confFile) + if err != nil { + return nil, err + } + if conf.Name == name { + return conf, nil + } + } + + // Try and load a network configuration file (instead of list) + // from the same name, then upconvert. + singleConf, err := LoadConf(dir, name) + if err != nil { + // A little extra logic so the error makes sense + if _, ok := err.(NoConfigsFoundError); len(files) != 0 && ok { + // Config lists found but no config files found + return nil, NotFoundError{dir, name} + } + + return nil, err + } + return ConfListFromConf(singleConf) +} + +func InjectConf(original *NetworkConfig, newValues map[string]interface{}) (*NetworkConfig, error) { + config := make(map[string]interface{}) + err := json.Unmarshal(original.Bytes, &config) + if err != nil { + return nil, fmt.Errorf("unmarshal existing network bytes: %w", err) + } + + for key, value := range newValues { + if key == "" { + return nil, fmt.Errorf("keys cannot be empty") + } + + if value == nil { + return nil, fmt.Errorf("key '%s' value must not be nil", key) + } + + config[key] = value + } + + newBytes, err := json.Marshal(config) + if err != nil { + return nil, err + } + + return ConfFromBytes(newBytes) +} + +// ConfListFromConf "upconverts" a network config in to a NetworkConfigList, +// with the single network as the only entry in the list. +func ConfListFromConf(original *NetworkConfig) (*NetworkConfigList, error) { + // Re-deserialize the config's json, then make a raw map configlist. + // This may seem a bit strange, but it's to make the Bytes fields + // actually make sense. Otherwise, the generated json is littered with + // golang default values. + + rawConfig := make(map[string]interface{}) + if err := json.Unmarshal(original.Bytes, &rawConfig); err != nil { + return nil, err + } + + rawConfigList := map[string]interface{}{ + "name": original.Network.Name, + "cniVersion": original.Network.CNIVersion, + "plugins": []interface{}{rawConfig}, + } + + b, err := json.Marshal(rawConfigList) + if err != nil { + return nil, err + } + return ConfListFromBytes(b) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/args.go b/vendor/github.com/containernetworking/cni/pkg/invoke/args.go new file mode 100644 index 0000000000000..3cdb4bc8dadf2 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/args.go @@ -0,0 +1,128 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "fmt" + "os" + "strings" +) + +type CNIArgs interface { + // For use with os/exec; i.e., return nil to inherit the + // environment from this process + // For use in delegation; inherit the environment from this + // process and allow overrides + AsEnv() []string +} + +type inherited struct{} + +var inheritArgsFromEnv inherited + +func (*inherited) AsEnv() []string { + return nil +} + +func ArgsFromEnv() CNIArgs { + return &inheritArgsFromEnv +} + +type Args struct { + Command string + ContainerID string + NetNS string + PluginArgs [][2]string + PluginArgsStr string + IfName string + Path string +} + +// Args implements the CNIArgs interface +var _ CNIArgs = &Args{} + +func (args *Args) AsEnv() []string { + env := os.Environ() + pluginArgsStr := args.PluginArgsStr + if pluginArgsStr == "" { + pluginArgsStr = stringify(args.PluginArgs) + } + + // Duplicated values which come first will be overridden, so we must put the + // custom values in the end to avoid being overridden by the process environments. + env = append(env, + "CNI_COMMAND="+args.Command, + "CNI_CONTAINERID="+args.ContainerID, + "CNI_NETNS="+args.NetNS, + "CNI_ARGS="+pluginArgsStr, + "CNI_IFNAME="+args.IfName, + "CNI_PATH="+args.Path, + ) + return dedupEnv(env) +} + +// taken from rkt/networking/net_plugin.go +func stringify(pluginArgs [][2]string) string { + entries := make([]string, len(pluginArgs)) + + for i, kv := range pluginArgs { + entries[i] = strings.Join(kv[:], "=") + } + + return strings.Join(entries, ";") +} + +// DelegateArgs implements the CNIArgs interface +// used for delegation to inherit from environments +// and allow some overrides like CNI_COMMAND +var _ CNIArgs = &DelegateArgs{} + +type DelegateArgs struct { + Command string +} + +func (d *DelegateArgs) AsEnv() []string { + env := os.Environ() + + // The custom values should come in the end to override the existing + // process environment of the same key. + env = append(env, + "CNI_COMMAND="+d.Command, + ) + return dedupEnv(env) +} + +// dedupEnv returns a copy of env with any duplicates removed, in favor of later values. +// Items not of the normal environment "key=value" form are preserved unchanged. +func dedupEnv(env []string) []string { + out := make([]string, 0, len(env)) + envMap := map[string]string{} + + for _, kv := range env { + // find the first "=" in environment, if not, just keep it + eq := strings.Index(kv, "=") + if eq < 0 { + out = append(out, kv) + continue + } + envMap[kv[:eq]] = kv[eq+1:] + } + + for k, v := range envMap { + out = append(out, fmt.Sprintf("%s=%s", k, v)) + } + + return out +} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go new file mode 100644 index 0000000000000..8defe4dd39825 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/delegate.go @@ -0,0 +1,80 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "context" + "os" + "path/filepath" + + "github.com/containernetworking/cni/pkg/types" +) + +func delegateCommon(delegatePlugin string, exec Exec) (string, Exec, error) { + if exec == nil { + exec = defaultExec + } + + paths := filepath.SplitList(os.Getenv("CNI_PATH")) + pluginPath, err := exec.FindInPath(delegatePlugin, paths) + if err != nil { + return "", nil, err + } + + return pluginPath, exec, nil +} + +// DelegateAdd calls the given delegate plugin with the CNI ADD action and +// JSON configuration +func DelegateAdd(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) (types.Result, error) { + pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) + if err != nil { + return nil, err + } + + // DelegateAdd will override the original "CNI_COMMAND" env from process with ADD + return ExecPluginWithResult(ctx, pluginPath, netconf, delegateArgs("ADD"), realExec) +} + +// DelegateCheck calls the given delegate plugin with the CNI CHECK action and +// JSON configuration +func DelegateCheck(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { + pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) + if err != nil { + return err + } + + // DelegateCheck will override the original CNI_COMMAND env from process with CHECK + return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("CHECK"), realExec) +} + +// DelegateDel calls the given delegate plugin with the CNI DEL action and +// JSON configuration +func DelegateDel(ctx context.Context, delegatePlugin string, netconf []byte, exec Exec) error { + pluginPath, realExec, err := delegateCommon(delegatePlugin, exec) + if err != nil { + return err + } + + // DelegateDel will override the original CNI_COMMAND env from process with DEL + return ExecPluginWithoutResult(ctx, pluginPath, netconf, delegateArgs("DEL"), realExec) +} + +// return CNIArgs used by delegation +func delegateArgs(action string) *DelegateArgs { + return &DelegateArgs{ + Command: action, + } +} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go new file mode 100644 index 0000000000000..55ed392a016f9 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/exec.go @@ -0,0 +1,181 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "context" + "encoding/json" + "fmt" + "os" + + "github.com/containernetworking/cni/pkg/types" + "github.com/containernetworking/cni/pkg/types/create" + "github.com/containernetworking/cni/pkg/version" +) + +// Exec is an interface encapsulates all operations that deal with finding +// and executing a CNI plugin. Tests may provide a fake implementation +// to avoid writing fake plugins to temporary directories during the test. +type Exec interface { + ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error) + FindInPath(plugin string, paths []string) (string, error) + Decode(jsonBytes []byte) (version.PluginInfo, error) +} + +// Plugin must return result in same version as specified in netconf; but +// for backwards compatibility reasons if the result version is empty use +// config version (rather than technically correct 0.1.0). +// https://github.com/containernetworking/cni/issues/895 +func fixupResultVersion(netconf, result []byte) (string, []byte, error) { + versionDecoder := &version.ConfigDecoder{} + confVersion, err := versionDecoder.Decode(netconf) + if err != nil { + return "", nil, err + } + + var rawResult map[string]interface{} + if err := json.Unmarshal(result, &rawResult); err != nil { + return "", nil, fmt.Errorf("failed to unmarshal raw result: %w", err) + } + + // Manually decode Result version; we need to know whether its cniVersion + // is empty, while built-in decoders (correctly) substitute 0.1.0 for an + // empty version per the CNI spec. + if resultVerRaw, ok := rawResult["cniVersion"]; ok { + resultVer, ok := resultVerRaw.(string) + if ok && resultVer != "" { + return resultVer, result, nil + } + } + + // If the cniVersion is not present or empty, assume the result is + // the same CNI spec version as the config + rawResult["cniVersion"] = confVersion + newBytes, err := json.Marshal(rawResult) + if err != nil { + return "", nil, fmt.Errorf("failed to remarshal fixed result: %w", err) + } + + return confVersion, newBytes, nil +} + +// For example, a testcase could pass an instance of the following fakeExec +// object to ExecPluginWithResult() to verify the incoming stdin and environment +// and provide a tailored response: +// +//import ( +// "encoding/json" +// "path" +// "strings" +//) +// +//type fakeExec struct { +// version.PluginDecoder +//} +// +//func (f *fakeExec) ExecPlugin(pluginPath string, stdinData []byte, environ []string) ([]byte, error) { +// net := &types.NetConf{} +// err := json.Unmarshal(stdinData, net) +// if err != nil { +// return nil, fmt.Errorf("failed to unmarshal configuration: %v", err) +// } +// pluginName := path.Base(pluginPath) +// if pluginName != net.Type { +// return nil, fmt.Errorf("plugin name %q did not match config type %q", pluginName, net.Type) +// } +// for _, e := range environ { +// // Check environment for forced failure request +// parts := strings.Split(e, "=") +// if len(parts) > 0 && parts[0] == "FAIL" { +// return nil, fmt.Errorf("failed to execute plugin %s", pluginName) +// } +// } +// return []byte("{\"CNIVersion\":\"0.4.0\"}"), nil +//} +// +//func (f *fakeExec) FindInPath(plugin string, paths []string) (string, error) { +// if len(paths) > 0 { +// return path.Join(paths[0], plugin), nil +// } +// return "", fmt.Errorf("failed to find plugin %s in paths %v", plugin, paths) +//} + +func ExecPluginWithResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) (types.Result, error) { + if exec == nil { + exec = defaultExec + } + + stdoutBytes, err := exec.ExecPlugin(ctx, pluginPath, netconf, args.AsEnv()) + if err != nil { + return nil, err + } + + resultVersion, fixedBytes, err := fixupResultVersion(netconf, stdoutBytes) + if err != nil { + return nil, err + } + + return create.Create(resultVersion, fixedBytes) +} + +func ExecPluginWithoutResult(ctx context.Context, pluginPath string, netconf []byte, args CNIArgs, exec Exec) error { + if exec == nil { + exec = defaultExec + } + _, err := exec.ExecPlugin(ctx, pluginPath, netconf, args.AsEnv()) + return err +} + +// GetVersionInfo returns the version information available about the plugin. +// For recent-enough plugins, it uses the information returned by the VERSION +// command. For older plugins which do not recognize that command, it reports +// version 0.1.0 +func GetVersionInfo(ctx context.Context, pluginPath string, exec Exec) (version.PluginInfo, error) { + if exec == nil { + exec = defaultExec + } + args := &Args{ + Command: "VERSION", + + // set fake values required by plugins built against an older version of skel + NetNS: "dummy", + IfName: "dummy", + Path: "dummy", + } + stdin := []byte(fmt.Sprintf(`{"cniVersion":%q}`, version.Current())) + stdoutBytes, err := exec.ExecPlugin(ctx, pluginPath, stdin, args.AsEnv()) + if err != nil { + if err.Error() == "unknown CNI_COMMAND: VERSION" { + return version.PluginSupports("0.1.0"), nil + } + return nil, err + } + + return exec.Decode(stdoutBytes) +} + +// DefaultExec is an object that implements the Exec interface which looks +// for and executes plugins from disk. +type DefaultExec struct { + *RawExec + version.PluginDecoder +} + +// DefaultExec implements the Exec interface +var _ Exec = &DefaultExec{} + +var defaultExec = &DefaultExec{ + RawExec: &RawExec{Stderr: os.Stderr}, +} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/find.go b/vendor/github.com/containernetworking/cni/pkg/invoke/find.go new file mode 100644 index 0000000000000..e62029eb788b1 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/find.go @@ -0,0 +1,48 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "fmt" + "os" + "path/filepath" + "strings" +) + +// FindInPath returns the full path of the plugin by searching in the provided path +func FindInPath(plugin string, paths []string) (string, error) { + if plugin == "" { + return "", fmt.Errorf("no plugin name provided") + } + + if strings.ContainsRune(plugin, os.PathSeparator) { + return "", fmt.Errorf("invalid plugin name: %s", plugin) + } + + if len(paths) == 0 { + return "", fmt.Errorf("no paths provided") + } + + for _, path := range paths { + for _, fe := range ExecutableFileExtensions { + fullpath := filepath.Join(path, plugin) + fe + if fi, err := os.Stat(fullpath); err == nil && fi.Mode().IsRegular() { + return fullpath, nil + } + } + } + + return "", fmt.Errorf("failed to find plugin %q in path %s", plugin, paths) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go b/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go new file mode 100644 index 0000000000000..9bcfb4553677c --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/os_unix.go @@ -0,0 +1,20 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package invoke + +// Valid file extensions for plugin executables. +var ExecutableFileExtensions = []string{""} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go b/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go new file mode 100644 index 0000000000000..7665125b133c6 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/os_windows.go @@ -0,0 +1,18 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +// Valid file extensions for plugin executables. +var ExecutableFileExtensions = []string{".exe", ""} diff --git a/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go b/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go new file mode 100644 index 0000000000000..5ab5cc8857627 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/invoke/raw_exec.go @@ -0,0 +1,88 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package invoke + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "os/exec" + "strings" + "time" + + "github.com/containernetworking/cni/pkg/types" +) + +type RawExec struct { + Stderr io.Writer +} + +func (e *RawExec) ExecPlugin(ctx context.Context, pluginPath string, stdinData []byte, environ []string) ([]byte, error) { + stdout := &bytes.Buffer{} + stderr := &bytes.Buffer{} + c := exec.CommandContext(ctx, pluginPath) + c.Env = environ + c.Stdin = bytes.NewBuffer(stdinData) + c.Stdout = stdout + c.Stderr = stderr + + // Retry the command on "text file busy" errors + for i := 0; i <= 5; i++ { + err := c.Run() + + // Command succeeded + if err == nil { + break + } + + // If the plugin is currently about to be written, then we wait a + // second and try it again + if strings.Contains(err.Error(), "text file busy") { + time.Sleep(time.Second) + continue + } + + // All other errors except than the busy text file + return nil, e.pluginErr(err, stdout.Bytes(), stderr.Bytes()) + } + + // Copy stderr to caller's buffer in case plugin printed to both + // stdout and stderr for some reason. Ignore failures as stderr is + // only informational. + if e.Stderr != nil && stderr.Len() > 0 { + _, _ = stderr.WriteTo(e.Stderr) + } + return stdout.Bytes(), nil +} + +func (e *RawExec) pluginErr(err error, stdout, stderr []byte) error { + emsg := types.Error{} + if len(stdout) == 0 { + if len(stderr) == 0 { + emsg.Msg = fmt.Sprintf("netplugin failed with no error message: %v", err) + } else { + emsg.Msg = fmt.Sprintf("netplugin failed: %q", string(stderr)) + } + } else if perr := json.Unmarshal(stdout, &emsg); perr != nil { + emsg.Msg = fmt.Sprintf("netplugin failed but error parsing its diagnostic message %q: %v", string(stdout), perr) + } + return &emsg +} + +func (e *RawExec) FindInPath(plugin string, paths []string) (string, error) { + return FindInPath(plugin, paths) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/020/types.go b/vendor/github.com/containernetworking/cni/pkg/types/020/types.go new file mode 100644 index 0000000000000..99b151ff2409f --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/020/types.go @@ -0,0 +1,189 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types020 + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" + + "github.com/containernetworking/cni/pkg/types" + convert "github.com/containernetworking/cni/pkg/types/internal" +) + +const ImplementedSpecVersion string = "0.2.0" + +var supportedVersions = []string{"", "0.1.0", ImplementedSpecVersion} + +// Register converters for all versions less than the implemented spec version +func init() { + convert.RegisterConverter("0.1.0", []string{ImplementedSpecVersion}, convertFrom010) + convert.RegisterConverter(ImplementedSpecVersion, []string{"0.1.0"}, convertTo010) + + // Creator + convert.RegisterCreator(supportedVersions, NewResult) +} + +// Compatibility types for CNI version 0.1.0 and 0.2.0 + +// NewResult creates a new Result object from JSON data. The JSON data +// must be compatible with the CNI versions implemented by this type. +func NewResult(data []byte) (types.Result, error) { + result := &Result{} + if err := json.Unmarshal(data, result); err != nil { + return nil, err + } + for _, v := range supportedVersions { + if result.CNIVersion == v { + if result.CNIVersion == "" { + result.CNIVersion = "0.1.0" + } + return result, nil + } + } + return nil, fmt.Errorf("result type supports %v but unmarshalled CNIVersion is %q", + supportedVersions, result.CNIVersion) +} + +// GetResult converts the given Result object to the ImplementedSpecVersion +// and returns the concrete type or an error +func GetResult(r types.Result) (*Result, error) { + result020, err := convert.Convert(r, ImplementedSpecVersion) + if err != nil { + return nil, err + } + result, ok := result020.(*Result) + if !ok { + return nil, fmt.Errorf("failed to convert result") + } + return result, nil +} + +func convertFrom010(from types.Result, toVersion string) (types.Result, error) { + if toVersion != "0.2.0" { + panic("only converts to version 0.2.0") + } + fromResult := from.(*Result) + return &Result{ + CNIVersion: ImplementedSpecVersion, + IP4: fromResult.IP4.Copy(), + IP6: fromResult.IP6.Copy(), + DNS: *fromResult.DNS.Copy(), + }, nil +} + +func convertTo010(from types.Result, toVersion string) (types.Result, error) { + if toVersion != "0.1.0" { + panic("only converts to version 0.1.0") + } + fromResult := from.(*Result) + return &Result{ + CNIVersion: "0.1.0", + IP4: fromResult.IP4.Copy(), + IP6: fromResult.IP6.Copy(), + DNS: *fromResult.DNS.Copy(), + }, nil +} + +// Result is what gets returned from the plugin (via stdout) to the caller +type Result struct { + CNIVersion string `json:"cniVersion,omitempty"` + IP4 *IPConfig `json:"ip4,omitempty"` + IP6 *IPConfig `json:"ip6,omitempty"` + DNS types.DNS `json:"dns,omitempty"` +} + +func (r *Result) Version() string { + return r.CNIVersion +} + +func (r *Result) GetAsVersion(version string) (types.Result, error) { + // If the creator of the result did not set the CNIVersion, assume it + // should be the highest spec version implemented by this Result + if r.CNIVersion == "" { + r.CNIVersion = ImplementedSpecVersion + } + return convert.Convert(r, version) +} + +func (r *Result) Print() error { + return r.PrintTo(os.Stdout) +} + +func (r *Result) PrintTo(writer io.Writer) error { + data, err := json.MarshalIndent(r, "", " ") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// IPConfig contains values necessary to configure an interface +type IPConfig struct { + IP net.IPNet + Gateway net.IP + Routes []types.Route +} + +func (i *IPConfig) Copy() *IPConfig { + if i == nil { + return nil + } + + var routes []types.Route + for _, fromRoute := range i.Routes { + routes = append(routes, *fromRoute.Copy()) + } + return &IPConfig{ + IP: i.IP, + Gateway: i.Gateway, + Routes: routes, + } +} + +// net.IPNet is not JSON (un)marshallable so this duality is needed +// for our custom IPNet type + +// JSON (un)marshallable types +type ipConfig struct { + IP types.IPNet `json:"ip"` + Gateway net.IP `json:"gateway,omitempty"` + Routes []types.Route `json:"routes,omitempty"` +} + +func (c *IPConfig) MarshalJSON() ([]byte, error) { + ipc := ipConfig{ + IP: types.IPNet(c.IP), + Gateway: c.Gateway, + Routes: c.Routes, + } + + return json.Marshal(ipc) +} + +func (c *IPConfig) UnmarshalJSON(data []byte) error { + ipc := ipConfig{} + if err := json.Unmarshal(data, &ipc); err != nil { + return err + } + + c.IP = net.IPNet(ipc.IP) + c.Gateway = ipc.Gateway + c.Routes = ipc.Routes + return nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/040/types.go b/vendor/github.com/containernetworking/cni/pkg/types/040/types.go new file mode 100644 index 0000000000000..3633b0eaa3a88 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/040/types.go @@ -0,0 +1,306 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types040 + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" + + "github.com/containernetworking/cni/pkg/types" + types020 "github.com/containernetworking/cni/pkg/types/020" + convert "github.com/containernetworking/cni/pkg/types/internal" +) + +const ImplementedSpecVersion string = "0.4.0" + +var supportedVersions = []string{"0.3.0", "0.3.1", ImplementedSpecVersion} + +// Register converters for all versions less than the implemented spec version +func init() { + // Up-converters + convert.RegisterConverter("0.1.0", supportedVersions, convertFrom02x) + convert.RegisterConverter("0.2.0", supportedVersions, convertFrom02x) + convert.RegisterConverter("0.3.0", supportedVersions, convertInternal) + convert.RegisterConverter("0.3.1", supportedVersions, convertInternal) + + // Down-converters + convert.RegisterConverter("0.4.0", []string{"0.3.0", "0.3.1"}, convertInternal) + convert.RegisterConverter("0.4.0", []string{"0.1.0", "0.2.0"}, convertTo02x) + convert.RegisterConverter("0.3.1", []string{"0.1.0", "0.2.0"}, convertTo02x) + convert.RegisterConverter("0.3.0", []string{"0.1.0", "0.2.0"}, convertTo02x) + + // Creator + convert.RegisterCreator(supportedVersions, NewResult) +} + +func NewResult(data []byte) (types.Result, error) { + result := &Result{} + if err := json.Unmarshal(data, result); err != nil { + return nil, err + } + for _, v := range supportedVersions { + if result.CNIVersion == v { + return result, nil + } + } + return nil, fmt.Errorf("result type supports %v but unmarshalled CNIVersion is %q", + supportedVersions, result.CNIVersion) +} + +func GetResult(r types.Result) (*Result, error) { + resultCurrent, err := r.GetAsVersion(ImplementedSpecVersion) + if err != nil { + return nil, err + } + result, ok := resultCurrent.(*Result) + if !ok { + return nil, fmt.Errorf("failed to convert result") + } + return result, nil +} + +func NewResultFromResult(result types.Result) (*Result, error) { + newResult, err := convert.Convert(result, ImplementedSpecVersion) + if err != nil { + return nil, err + } + return newResult.(*Result), nil +} + +// Result is what gets returned from the plugin (via stdout) to the caller +type Result struct { + CNIVersion string `json:"cniVersion,omitempty"` + Interfaces []*Interface `json:"interfaces,omitempty"` + IPs []*IPConfig `json:"ips,omitempty"` + Routes []*types.Route `json:"routes,omitempty"` + DNS types.DNS `json:"dns,omitempty"` +} + +func convert020IPConfig(from *types020.IPConfig, ipVersion string) *IPConfig { + return &IPConfig{ + Version: ipVersion, + Address: from.IP, + Gateway: from.Gateway, + } +} + +func convertFrom02x(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*types020.Result) + toResult := &Result{ + CNIVersion: toVersion, + DNS: *fromResult.DNS.Copy(), + Routes: []*types.Route{}, + } + if fromResult.IP4 != nil { + toResult.IPs = append(toResult.IPs, convert020IPConfig(fromResult.IP4, "4")) + for _, fromRoute := range fromResult.IP4.Routes { + toResult.Routes = append(toResult.Routes, fromRoute.Copy()) + } + } + + if fromResult.IP6 != nil { + toResult.IPs = append(toResult.IPs, convert020IPConfig(fromResult.IP6, "6")) + for _, fromRoute := range fromResult.IP6.Routes { + toResult.Routes = append(toResult.Routes, fromRoute.Copy()) + } + } + + return toResult, nil +} + +func convertInternal(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*Result) + toResult := &Result{ + CNIVersion: toVersion, + DNS: *fromResult.DNS.Copy(), + Routes: []*types.Route{}, + } + for _, fromIntf := range fromResult.Interfaces { + toResult.Interfaces = append(toResult.Interfaces, fromIntf.Copy()) + } + for _, fromIPC := range fromResult.IPs { + toResult.IPs = append(toResult.IPs, fromIPC.Copy()) + } + for _, fromRoute := range fromResult.Routes { + toResult.Routes = append(toResult.Routes, fromRoute.Copy()) + } + return toResult, nil +} + +func convertTo02x(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*Result) + toResult := &types020.Result{ + CNIVersion: toVersion, + DNS: *fromResult.DNS.Copy(), + } + + for _, fromIP := range fromResult.IPs { + // Only convert the first IP address of each version as 0.2.0 + // and earlier cannot handle multiple IP addresses + if fromIP.Version == "4" && toResult.IP4 == nil { + toResult.IP4 = &types020.IPConfig{ + IP: fromIP.Address, + Gateway: fromIP.Gateway, + } + } else if fromIP.Version == "6" && toResult.IP6 == nil { + toResult.IP6 = &types020.IPConfig{ + IP: fromIP.Address, + Gateway: fromIP.Gateway, + } + } + if toResult.IP4 != nil && toResult.IP6 != nil { + break + } + } + + for _, fromRoute := range fromResult.Routes { + is4 := fromRoute.Dst.IP.To4() != nil + if is4 && toResult.IP4 != nil { + toResult.IP4.Routes = append(toResult.IP4.Routes, types.Route{ + Dst: fromRoute.Dst, + GW: fromRoute.GW, + }) + } else if !is4 && toResult.IP6 != nil { + toResult.IP6.Routes = append(toResult.IP6.Routes, types.Route{ + Dst: fromRoute.Dst, + GW: fromRoute.GW, + }) + } + } + + // 0.2.0 and earlier require at least one IP address in the Result + if toResult.IP4 == nil && toResult.IP6 == nil { + return nil, fmt.Errorf("cannot convert: no valid IP addresses") + } + + return toResult, nil +} + +func (r *Result) Version() string { + return r.CNIVersion +} + +func (r *Result) GetAsVersion(version string) (types.Result, error) { + // If the creator of the result did not set the CNIVersion, assume it + // should be the highest spec version implemented by this Result + if r.CNIVersion == "" { + r.CNIVersion = ImplementedSpecVersion + } + return convert.Convert(r, version) +} + +func (r *Result) Print() error { + return r.PrintTo(os.Stdout) +} + +func (r *Result) PrintTo(writer io.Writer) error { + data, err := json.MarshalIndent(r, "", " ") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// Interface contains values about the created interfaces +type Interface struct { + Name string `json:"name"` + Mac string `json:"mac,omitempty"` + Sandbox string `json:"sandbox,omitempty"` +} + +func (i *Interface) String() string { + return fmt.Sprintf("%+v", *i) +} + +func (i *Interface) Copy() *Interface { + if i == nil { + return nil + } + newIntf := *i + return &newIntf +} + +// Int returns a pointer to the int value passed in. Used to +// set the IPConfig.Interface field. +func Int(v int) *int { + return &v +} + +// IPConfig contains values necessary to configure an IP address on an interface +type IPConfig struct { + // IP version, either "4" or "6" + Version string + // Index into Result structs Interfaces list + Interface *int + Address net.IPNet + Gateway net.IP +} + +func (i *IPConfig) String() string { + return fmt.Sprintf("%+v", *i) +} + +func (i *IPConfig) Copy() *IPConfig { + if i == nil { + return nil + } + + ipc := &IPConfig{ + Version: i.Version, + Address: i.Address, + Gateway: i.Gateway, + } + if i.Interface != nil { + intf := *i.Interface + ipc.Interface = &intf + } + return ipc +} + +// JSON (un)marshallable types +type ipConfig struct { + Version string `json:"version"` + Interface *int `json:"interface,omitempty"` + Address types.IPNet `json:"address"` + Gateway net.IP `json:"gateway,omitempty"` +} + +func (c *IPConfig) MarshalJSON() ([]byte, error) { + ipc := ipConfig{ + Version: c.Version, + Interface: c.Interface, + Address: types.IPNet(c.Address), + Gateway: c.Gateway, + } + + return json.Marshal(ipc) +} + +func (c *IPConfig) UnmarshalJSON(data []byte) error { + ipc := ipConfig{} + if err := json.Unmarshal(data, &ipc); err != nil { + return err + } + + c.Version = ipc.Version + c.Interface = ipc.Interface + c.Address = net.IPNet(ipc.Address) + c.Gateway = ipc.Gateway + return nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/100/types.go b/vendor/github.com/containernetworking/cni/pkg/types/100/types.go new file mode 100644 index 0000000000000..0e1e8b857b78d --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/100/types.go @@ -0,0 +1,307 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types100 + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" + + "github.com/containernetworking/cni/pkg/types" + types040 "github.com/containernetworking/cni/pkg/types/040" + convert "github.com/containernetworking/cni/pkg/types/internal" +) + +const ImplementedSpecVersion string = "1.0.0" + +var supportedVersions = []string{ImplementedSpecVersion} + +// Register converters for all versions less than the implemented spec version +func init() { + // Up-converters + convert.RegisterConverter("0.1.0", supportedVersions, convertFrom02x) + convert.RegisterConverter("0.2.0", supportedVersions, convertFrom02x) + convert.RegisterConverter("0.3.0", supportedVersions, convertFrom04x) + convert.RegisterConverter("0.3.1", supportedVersions, convertFrom04x) + convert.RegisterConverter("0.4.0", supportedVersions, convertFrom04x) + + // Down-converters + convert.RegisterConverter("1.0.0", []string{"0.3.0", "0.3.1", "0.4.0"}, convertTo04x) + convert.RegisterConverter("1.0.0", []string{"0.1.0", "0.2.0"}, convertTo02x) + + // Creator + convert.RegisterCreator(supportedVersions, NewResult) +} + +func NewResult(data []byte) (types.Result, error) { + result := &Result{} + if err := json.Unmarshal(data, result); err != nil { + return nil, err + } + for _, v := range supportedVersions { + if result.CNIVersion == v { + return result, nil + } + } + return nil, fmt.Errorf("result type supports %v but unmarshalled CNIVersion is %q", + supportedVersions, result.CNIVersion) +} + +func GetResult(r types.Result) (*Result, error) { + resultCurrent, err := r.GetAsVersion(ImplementedSpecVersion) + if err != nil { + return nil, err + } + result, ok := resultCurrent.(*Result) + if !ok { + return nil, fmt.Errorf("failed to convert result") + } + return result, nil +} + +func NewResultFromResult(result types.Result) (*Result, error) { + newResult, err := convert.Convert(result, ImplementedSpecVersion) + if err != nil { + return nil, err + } + return newResult.(*Result), nil +} + +// Result is what gets returned from the plugin (via stdout) to the caller +type Result struct { + CNIVersion string `json:"cniVersion,omitempty"` + Interfaces []*Interface `json:"interfaces,omitempty"` + IPs []*IPConfig `json:"ips,omitempty"` + Routes []*types.Route `json:"routes,omitempty"` + DNS types.DNS `json:"dns,omitempty"` +} + +func convertFrom02x(from types.Result, toVersion string) (types.Result, error) { + result040, err := convert.Convert(from, "0.4.0") + if err != nil { + return nil, err + } + result100, err := convertFrom04x(result040, ImplementedSpecVersion) + if err != nil { + return nil, err + } + return result100, nil +} + +func convertIPConfigFrom040(from *types040.IPConfig) *IPConfig { + to := &IPConfig{ + Address: from.Address, + Gateway: from.Gateway, + } + if from.Interface != nil { + intf := *from.Interface + to.Interface = &intf + } + return to +} + +func convertInterfaceFrom040(from *types040.Interface) *Interface { + return &Interface{ + Name: from.Name, + Mac: from.Mac, + Sandbox: from.Sandbox, + } +} + +func convertFrom04x(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*types040.Result) + toResult := &Result{ + CNIVersion: toVersion, + DNS: *fromResult.DNS.Copy(), + Routes: []*types.Route{}, + } + for _, fromIntf := range fromResult.Interfaces { + toResult.Interfaces = append(toResult.Interfaces, convertInterfaceFrom040(fromIntf)) + } + for _, fromIPC := range fromResult.IPs { + toResult.IPs = append(toResult.IPs, convertIPConfigFrom040(fromIPC)) + } + for _, fromRoute := range fromResult.Routes { + toResult.Routes = append(toResult.Routes, fromRoute.Copy()) + } + return toResult, nil +} + +func convertIPConfigTo040(from *IPConfig) *types040.IPConfig { + version := "6" + if from.Address.IP.To4() != nil { + version = "4" + } + to := &types040.IPConfig{ + Version: version, + Address: from.Address, + Gateway: from.Gateway, + } + if from.Interface != nil { + intf := *from.Interface + to.Interface = &intf + } + return to +} + +func convertInterfaceTo040(from *Interface) *types040.Interface { + return &types040.Interface{ + Name: from.Name, + Mac: from.Mac, + Sandbox: from.Sandbox, + } +} + +func convertTo04x(from types.Result, toVersion string) (types.Result, error) { + fromResult := from.(*Result) + toResult := &types040.Result{ + CNIVersion: toVersion, + DNS: *fromResult.DNS.Copy(), + Routes: []*types.Route{}, + } + for _, fromIntf := range fromResult.Interfaces { + toResult.Interfaces = append(toResult.Interfaces, convertInterfaceTo040(fromIntf)) + } + for _, fromIPC := range fromResult.IPs { + toResult.IPs = append(toResult.IPs, convertIPConfigTo040(fromIPC)) + } + for _, fromRoute := range fromResult.Routes { + toResult.Routes = append(toResult.Routes, fromRoute.Copy()) + } + return toResult, nil +} + +func convertTo02x(from types.Result, toVersion string) (types.Result, error) { + // First convert to 0.4.0 + result040, err := convertTo04x(from, "0.4.0") + if err != nil { + return nil, err + } + result02x, err := convert.Convert(result040, toVersion) + if err != nil { + return nil, err + } + return result02x, nil +} + +func (r *Result) Version() string { + return r.CNIVersion +} + +func (r *Result) GetAsVersion(version string) (types.Result, error) { + // If the creator of the result did not set the CNIVersion, assume it + // should be the highest spec version implemented by this Result + if r.CNIVersion == "" { + r.CNIVersion = ImplementedSpecVersion + } + return convert.Convert(r, version) +} + +func (r *Result) Print() error { + return r.PrintTo(os.Stdout) +} + +func (r *Result) PrintTo(writer io.Writer) error { + data, err := json.MarshalIndent(r, "", " ") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// Interface contains values about the created interfaces +type Interface struct { + Name string `json:"name"` + Mac string `json:"mac,omitempty"` + Sandbox string `json:"sandbox,omitempty"` +} + +func (i *Interface) String() string { + return fmt.Sprintf("%+v", *i) +} + +func (i *Interface) Copy() *Interface { + if i == nil { + return nil + } + newIntf := *i + return &newIntf +} + +// Int returns a pointer to the int value passed in. Used to +// set the IPConfig.Interface field. +func Int(v int) *int { + return &v +} + +// IPConfig contains values necessary to configure an IP address on an interface +type IPConfig struct { + // Index into Result structs Interfaces list + Interface *int + Address net.IPNet + Gateway net.IP +} + +func (i *IPConfig) String() string { + return fmt.Sprintf("%+v", *i) +} + +func (i *IPConfig) Copy() *IPConfig { + if i == nil { + return nil + } + + ipc := &IPConfig{ + Address: i.Address, + Gateway: i.Gateway, + } + if i.Interface != nil { + intf := *i.Interface + ipc.Interface = &intf + } + return ipc +} + +// JSON (un)marshallable types +type ipConfig struct { + Interface *int `json:"interface,omitempty"` + Address types.IPNet `json:"address"` + Gateway net.IP `json:"gateway,omitempty"` +} + +func (c *IPConfig) MarshalJSON() ([]byte, error) { + ipc := ipConfig{ + Interface: c.Interface, + Address: types.IPNet(c.Address), + Gateway: c.Gateway, + } + + return json.Marshal(ipc) +} + +func (c *IPConfig) UnmarshalJSON(data []byte) error { + ipc := ipConfig{} + if err := json.Unmarshal(data, &ipc); err != nil { + return err + } + + c.Interface = ipc.Interface + c.Address = net.IPNet(ipc.Address) + c.Gateway = ipc.Gateway + return nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/args.go b/vendor/github.com/containernetworking/cni/pkg/types/args.go new file mode 100644 index 0000000000000..7516f03ef581c --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/args.go @@ -0,0 +1,122 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "encoding" + "fmt" + "reflect" + "strings" +) + +// UnmarshallableBool typedef for builtin bool +// because builtin type's methods can't be declared +type UnmarshallableBool bool + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// Returns boolean true if the string is "1" or "[Tt]rue" +// Returns boolean false if the string is "0" or "[Ff]alse" +func (b *UnmarshallableBool) UnmarshalText(data []byte) error { + s := strings.ToLower(string(data)) + switch s { + case "1", "true": + *b = true + case "0", "false": + *b = false + default: + return fmt.Errorf("boolean unmarshal error: invalid input %s", s) + } + return nil +} + +// UnmarshallableString typedef for builtin string +type UnmarshallableString string + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// Returns the string +func (s *UnmarshallableString) UnmarshalText(data []byte) error { + *s = UnmarshallableString(data) + return nil +} + +// CommonArgs contains the IgnoreUnknown argument +// and must be embedded by all Arg structs +type CommonArgs struct { + IgnoreUnknown UnmarshallableBool `json:"ignoreunknown,omitempty"` +} + +// GetKeyField is a helper function to receive Values +// Values that represent a pointer to a struct +func GetKeyField(keyString string, v reflect.Value) reflect.Value { + return v.Elem().FieldByName(keyString) +} + +// UnmarshalableArgsError is used to indicate error unmarshalling args +// from the args-string in the form "K=V;K2=V2;..." +type UnmarshalableArgsError struct { + error +} + +// LoadArgs parses args from a string in the form "K=V;K2=V2;..." +func LoadArgs(args string, container interface{}) error { + if args == "" { + return nil + } + + containerValue := reflect.ValueOf(container) + + pairs := strings.Split(args, ";") + unknownArgs := []string{} + for _, pair := range pairs { + kv := strings.Split(pair, "=") + if len(kv) != 2 { + return fmt.Errorf("ARGS: invalid pair %q", pair) + } + keyString := kv[0] + valueString := kv[1] + keyField := GetKeyField(keyString, containerValue) + if !keyField.IsValid() { + unknownArgs = append(unknownArgs, pair) + continue + } + + var keyFieldInterface interface{} + switch { + case keyField.Kind() == reflect.Ptr: + keyField.Set(reflect.New(keyField.Type().Elem())) + keyFieldInterface = keyField.Interface() + case keyField.CanAddr() && keyField.Addr().CanInterface(): + keyFieldInterface = keyField.Addr().Interface() + default: + return UnmarshalableArgsError{fmt.Errorf("field '%s' has no valid interface", keyString)} + } + u, ok := keyFieldInterface.(encoding.TextUnmarshaler) + if !ok { + return UnmarshalableArgsError{fmt.Errorf( + "ARGS: cannot unmarshal into field '%s' - type '%s' does not implement encoding.TextUnmarshaler", + keyString, reflect.TypeOf(keyFieldInterface))} + } + err := u.UnmarshalText([]byte(valueString)) + if err != nil { + return fmt.Errorf("ARGS: error parsing value of pair %q: %w", pair, err) + } + } + + isIgnoreUnknown := GetKeyField("IgnoreUnknown", containerValue).Bool() + if len(unknownArgs) > 0 && !isIgnoreUnknown { + return fmt.Errorf("ARGS: unknown args %q", unknownArgs) + } + return nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/create/create.go b/vendor/github.com/containernetworking/cni/pkg/types/create/create.go new file mode 100644 index 0000000000000..ed28b33e8e1c1 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/create/create.go @@ -0,0 +1,56 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package create + +import ( + "encoding/json" + "fmt" + + "github.com/containernetworking/cni/pkg/types" + convert "github.com/containernetworking/cni/pkg/types/internal" +) + +// DecodeVersion returns the CNI version from CNI configuration or result JSON, +// or an error if the operation could not be performed. +func DecodeVersion(jsonBytes []byte) (string, error) { + var conf struct { + CNIVersion string `json:"cniVersion"` + } + err := json.Unmarshal(jsonBytes, &conf) + if err != nil { + return "", fmt.Errorf("decoding version from network config: %w", err) + } + if conf.CNIVersion == "" { + return "0.1.0", nil + } + return conf.CNIVersion, nil +} + +// Create creates a CNI Result using the given JSON with the expected +// version, or an error if the creation could not be performed +func Create(version string, bytes []byte) (types.Result, error) { + return convert.Create(version, bytes) +} + +// CreateFromBytes creates a CNI Result from the given JSON, automatically +// detecting the CNI spec version of the result. An error is returned if the +// operation could not be performed. +func CreateFromBytes(bytes []byte) (types.Result, error) { + version, err := DecodeVersion(bytes) + if err != nil { + return nil, err + } + return convert.Create(version, bytes) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/internal/convert.go b/vendor/github.com/containernetworking/cni/pkg/types/internal/convert.go new file mode 100644 index 0000000000000..bdbe4b0a59a9a --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/internal/convert.go @@ -0,0 +1,92 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package convert + +import ( + "fmt" + + "github.com/containernetworking/cni/pkg/types" +) + +// ConvertFn should convert from the given arbitrary Result type into a +// Result implementing CNI specification version passed in toVersion. +// The function is guaranteed to be passed a Result type matching the +// fromVersion it was registered with, and is guaranteed to be +// passed a toVersion matching one of the toVersions it was registered with. +type ConvertFn func(from types.Result, toVersion string) (types.Result, error) + +type converter struct { + // fromVersion is the CNI Result spec version that convertFn accepts + fromVersion string + // toVersions is a list of versions that convertFn can convert to + toVersions []string + convertFn ConvertFn +} + +var converters []*converter + +func findConverter(fromVersion, toVersion string) *converter { + for _, c := range converters { + if c.fromVersion == fromVersion { + for _, v := range c.toVersions { + if v == toVersion { + return c + } + } + } + } + return nil +} + +// Convert converts a CNI Result to the requested CNI specification version, +// or returns an error if the conversion could not be performed or failed +func Convert(from types.Result, toVersion string) (types.Result, error) { + if toVersion == "" { + toVersion = "0.1.0" + } + + fromVersion := from.Version() + + // Shortcut for same version + if fromVersion == toVersion { + return from, nil + } + + // Otherwise find the right converter + c := findConverter(fromVersion, toVersion) + if c == nil { + return nil, fmt.Errorf("no converter for CNI result version %s to %s", + fromVersion, toVersion) + } + return c.convertFn(from, toVersion) +} + +// RegisterConverter registers a CNI Result converter. SHOULD NOT BE CALLED +// EXCEPT FROM CNI ITSELF. +func RegisterConverter(fromVersion string, toVersions []string, convertFn ConvertFn) { + // Make sure there is no converter already registered for these + // from and to versions + for _, v := range toVersions { + if findConverter(fromVersion, v) != nil { + panic(fmt.Sprintf("converter already registered for %s to %s", + fromVersion, v)) + } + } + converters = append(converters, &converter{ + fromVersion: fromVersion, + toVersions: toVersions, + convertFn: convertFn, + }) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/internal/create.go b/vendor/github.com/containernetworking/cni/pkg/types/internal/create.go new file mode 100644 index 0000000000000..9636309125946 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/internal/create.go @@ -0,0 +1,66 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package convert + +import ( + "fmt" + + "github.com/containernetworking/cni/pkg/types" +) + +type ResultFactoryFunc func([]byte) (types.Result, error) + +type creator struct { + // CNI Result spec versions that createFn can create a Result for + versions []string + createFn ResultFactoryFunc +} + +var creators []*creator + +func findCreator(version string) *creator { + for _, c := range creators { + for _, v := range c.versions { + if v == version { + return c + } + } + } + return nil +} + +// Create creates a CNI Result using the given JSON, or an error if the creation +// could not be performed +func Create(version string, bytes []byte) (types.Result, error) { + if c := findCreator(version); c != nil { + return c.createFn(bytes) + } + return nil, fmt.Errorf("unsupported CNI result version %q", version) +} + +// RegisterCreator registers a CNI Result creator. SHOULD NOT BE CALLED +// EXCEPT FROM CNI ITSELF. +func RegisterCreator(versions []string, createFn ResultFactoryFunc) { + // Make sure there is no creator already registered for these versions + for _, v := range versions { + if findCreator(v) != nil { + panic(fmt.Sprintf("creator already registered for %s", v)) + } + } + creators = append(creators, &creator{ + versions: versions, + createFn: createFn, + }) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/types/types.go b/vendor/github.com/containernetworking/cni/pkg/types/types.go new file mode 100644 index 0000000000000..fba17dfc0f394 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/types/types.go @@ -0,0 +1,234 @@ +// Copyright 2015 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "encoding/json" + "fmt" + "io" + "net" + "os" +) + +// like net.IPNet but adds JSON marshalling and unmarshalling +type IPNet net.IPNet + +// ParseCIDR takes a string like "10.2.3.1/24" and +// return IPNet with "10.2.3.1" and /24 mask +func ParseCIDR(s string) (*net.IPNet, error) { + ip, ipn, err := net.ParseCIDR(s) + if err != nil { + return nil, err + } + + ipn.IP = ip + return ipn, nil +} + +func (n IPNet) MarshalJSON() ([]byte, error) { + return json.Marshal((*net.IPNet)(&n).String()) +} + +func (n *IPNet) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + + tmp, err := ParseCIDR(s) + if err != nil { + return err + } + + *n = IPNet(*tmp) + return nil +} + +// NetConf describes a network. +type NetConf struct { + CNIVersion string `json:"cniVersion,omitempty"` + + Name string `json:"name,omitempty"` + Type string `json:"type,omitempty"` + Capabilities map[string]bool `json:"capabilities,omitempty"` + IPAM IPAM `json:"ipam,omitempty"` + DNS DNS `json:"dns"` + + RawPrevResult map[string]interface{} `json:"prevResult,omitempty"` + PrevResult Result `json:"-"` +} + +type IPAM struct { + Type string `json:"type,omitempty"` +} + +// NetConfList describes an ordered list of networks. +type NetConfList struct { + CNIVersion string `json:"cniVersion,omitempty"` + + Name string `json:"name,omitempty"` + DisableCheck bool `json:"disableCheck,omitempty"` + Plugins []*NetConf `json:"plugins,omitempty"` +} + +// Result is an interface that provides the result of plugin execution +type Result interface { + // The highest CNI specification result version the result supports + // without having to convert + Version() string + + // Returns the result converted into the requested CNI specification + // result version, or an error if conversion failed + GetAsVersion(version string) (Result, error) + + // Prints the result in JSON format to stdout + Print() error + + // Prints the result in JSON format to provided writer + PrintTo(writer io.Writer) error +} + +func PrintResult(result Result, version string) error { + newResult, err := result.GetAsVersion(version) + if err != nil { + return err + } + return newResult.Print() +} + +// DNS contains values interesting for DNS resolvers +type DNS struct { + Nameservers []string `json:"nameservers,omitempty"` + Domain string `json:"domain,omitempty"` + Search []string `json:"search,omitempty"` + Options []string `json:"options,omitempty"` +} + +func (d *DNS) Copy() *DNS { + if d == nil { + return nil + } + + to := &DNS{Domain: d.Domain} + for _, ns := range d.Nameservers { + to.Nameservers = append(to.Nameservers, ns) + } + for _, s := range d.Search { + to.Search = append(to.Search, s) + } + for _, o := range d.Options { + to.Options = append(to.Options, o) + } + return to +} + +type Route struct { + Dst net.IPNet + GW net.IP +} + +func (r *Route) String() string { + return fmt.Sprintf("%+v", *r) +} + +func (r *Route) Copy() *Route { + if r == nil { + return nil + } + + return &Route{ + Dst: r.Dst, + GW: r.GW, + } +} + +// Well known error codes +// see https://github.com/containernetworking/cni/blob/master/SPEC.md#well-known-error-codes +const ( + ErrUnknown uint = iota // 0 + ErrIncompatibleCNIVersion // 1 + ErrUnsupportedField // 2 + ErrUnknownContainer // 3 + ErrInvalidEnvironmentVariables // 4 + ErrIOFailure // 5 + ErrDecodingFailure // 6 + ErrInvalidNetworkConfig // 7 + ErrTryAgainLater uint = 11 + ErrInternal uint = 999 +) + +type Error struct { + Code uint `json:"code"` + Msg string `json:"msg"` + Details string `json:"details,omitempty"` +} + +func NewError(code uint, msg, details string) *Error { + return &Error{ + Code: code, + Msg: msg, + Details: details, + } +} + +func (e *Error) Error() string { + details := "" + if e.Details != "" { + details = fmt.Sprintf("; %v", e.Details) + } + return fmt.Sprintf("%v%v", e.Msg, details) +} + +func (e *Error) Print() error { + return prettyPrint(e) +} + +// net.IPNet is not JSON (un)marshallable so this duality is needed +// for our custom IPNet type + +// JSON (un)marshallable types +type route struct { + Dst IPNet `json:"dst"` + GW net.IP `json:"gw,omitempty"` +} + +func (r *Route) UnmarshalJSON(data []byte) error { + rt := route{} + if err := json.Unmarshal(data, &rt); err != nil { + return err + } + + r.Dst = net.IPNet(rt.Dst) + r.GW = rt.GW + return nil +} + +func (r Route) MarshalJSON() ([]byte, error) { + rt := route{ + Dst: IPNet(r.Dst), + GW: r.GW, + } + + return json.Marshal(rt) +} + +func prettyPrint(obj interface{}) error { + data, err := json.MarshalIndent(obj, "", " ") + if err != nil { + return err + } + _, err = os.Stdout.Write(data) + return err +} diff --git a/vendor/github.com/containernetworking/cni/pkg/utils/utils.go b/vendor/github.com/containernetworking/cni/pkg/utils/utils.go new file mode 100644 index 0000000000000..b8ec38874595f --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/utils/utils.go @@ -0,0 +1,84 @@ +// Copyright 2019 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package utils + +import ( + "bytes" + "fmt" + "regexp" + "unicode" + + "github.com/containernetworking/cni/pkg/types" +) + +const ( + // cniValidNameChars is the regexp used to validate valid characters in + // containerID and networkName + cniValidNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.\-]` + + // maxInterfaceNameLength is the length max of a valid interface name + maxInterfaceNameLength = 15 +) + +var cniReg = regexp.MustCompile(`^` + cniValidNameChars + `*$`) + +// ValidateContainerID will validate that the supplied containerID is not empty does not contain invalid characters +func ValidateContainerID(containerID string) *types.Error { + + if containerID == "" { + return types.NewError(types.ErrUnknownContainer, "missing containerID", "") + } + if !cniReg.MatchString(containerID) { + return types.NewError(types.ErrInvalidEnvironmentVariables, "invalid characters in containerID", containerID) + } + return nil +} + +// ValidateNetworkName will validate that the supplied networkName does not contain invalid characters +func ValidateNetworkName(networkName string) *types.Error { + + if networkName == "" { + return types.NewError(types.ErrInvalidNetworkConfig, "missing network name:", "") + } + if !cniReg.MatchString(networkName) { + return types.NewError(types.ErrInvalidNetworkConfig, "invalid characters found in network name", networkName) + } + return nil +} + +// ValidateInterfaceName will validate the interface name based on the three rules below +// 1. The name must not be empty +// 2. The name must be less than 16 characters +// 3. The name must not be "." or ".." +// 3. The name must not contain / or : or any whitespace characters +// ref to https://github.com/torvalds/linux/blob/master/net/core/dev.c#L1024 +func ValidateInterfaceName(ifName string) *types.Error { + if len(ifName) == 0 { + return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is empty", "") + } + if len(ifName) > maxInterfaceNameLength { + return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is too long", fmt.Sprintf("interface name should be less than %d characters", maxInterfaceNameLength+1)) + } + if ifName == "." || ifName == ".." { + return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name is . or ..", "") + } + for _, r := range bytes.Runes([]byte(ifName)) { + if r == '/' || r == ':' || unicode.IsSpace(r) { + return types.NewError(types.ErrInvalidEnvironmentVariables, "interface name contains / or : or whitespace characters", "") + } + } + + return nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/conf.go b/vendor/github.com/containernetworking/cni/pkg/version/conf.go new file mode 100644 index 0000000000000..808c33b8382ad --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/version/conf.go @@ -0,0 +1,26 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "github.com/containernetworking/cni/pkg/types/create" +) + +// ConfigDecoder can decode the CNI version available in network config data +type ConfigDecoder struct{} + +func (*ConfigDecoder) Decode(jsonBytes []byte) (string, error) { + return create.DecodeVersion(jsonBytes) +} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/plugin.go b/vendor/github.com/containernetworking/cni/pkg/version/plugin.go new file mode 100644 index 0000000000000..17b22b6b0c4e1 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/version/plugin.go @@ -0,0 +1,144 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "encoding/json" + "fmt" + "io" + "strconv" + "strings" +) + +// PluginInfo reports information about CNI versioning +type PluginInfo interface { + // SupportedVersions returns one or more CNI spec versions that the plugin + // supports. If input is provided in one of these versions, then the plugin + // promises to use the same CNI version in its response + SupportedVersions() []string + + // Encode writes this CNI version information as JSON to the given Writer + Encode(io.Writer) error +} + +type pluginInfo struct { + CNIVersion_ string `json:"cniVersion"` + SupportedVersions_ []string `json:"supportedVersions,omitempty"` +} + +// pluginInfo implements the PluginInfo interface +var _ PluginInfo = &pluginInfo{} + +func (p *pluginInfo) Encode(w io.Writer) error { + return json.NewEncoder(w).Encode(p) +} + +func (p *pluginInfo) SupportedVersions() []string { + return p.SupportedVersions_ +} + +// PluginSupports returns a new PluginInfo that will report the given versions +// as supported +func PluginSupports(supportedVersions ...string) PluginInfo { + if len(supportedVersions) < 1 { + panic("programmer error: you must support at least one version") + } + return &pluginInfo{ + CNIVersion_: Current(), + SupportedVersions_: supportedVersions, + } +} + +// PluginDecoder can decode the response returned by a plugin's VERSION command +type PluginDecoder struct{} + +func (*PluginDecoder) Decode(jsonBytes []byte) (PluginInfo, error) { + var info pluginInfo + err := json.Unmarshal(jsonBytes, &info) + if err != nil { + return nil, fmt.Errorf("decoding version info: %w", err) + } + if info.CNIVersion_ == "" { + return nil, fmt.Errorf("decoding version info: missing field cniVersion") + } + if len(info.SupportedVersions_) == 0 { + if info.CNIVersion_ == "0.2.0" { + return PluginSupports("0.1.0", "0.2.0"), nil + } + return nil, fmt.Errorf("decoding version info: missing field supportedVersions") + } + return &info, nil +} + +// ParseVersion parses a version string like "3.0.1" or "0.4.5" into major, +// minor, and micro numbers or returns an error +func ParseVersion(version string) (int, int, int, error) { + var major, minor, micro int + if version == "" { // special case: no version declared == v0.1.0 + return 0, 1, 0, nil + } + + parts := strings.Split(version, ".") + if len(parts) >= 4 { + return -1, -1, -1, fmt.Errorf("invalid version %q: too many parts", version) + } + + major, err := strconv.Atoi(parts[0]) + if err != nil { + return -1, -1, -1, fmt.Errorf("failed to convert major version part %q: %w", parts[0], err) + } + + if len(parts) >= 2 { + minor, err = strconv.Atoi(parts[1]) + if err != nil { + return -1, -1, -1, fmt.Errorf("failed to convert minor version part %q: %w", parts[1], err) + } + } + + if len(parts) >= 3 { + micro, err = strconv.Atoi(parts[2]) + if err != nil { + return -1, -1, -1, fmt.Errorf("failed to convert micro version part %q: %w", parts[2], err) + } + } + + return major, minor, micro, nil +} + +// GreaterThanOrEqualTo takes two string versions, parses them into major/minor/micro +// numbers, and compares them to determine whether the first version is greater +// than or equal to the second +func GreaterThanOrEqualTo(version, otherVersion string) (bool, error) { + firstMajor, firstMinor, firstMicro, err := ParseVersion(version) + if err != nil { + return false, err + } + + secondMajor, secondMinor, secondMicro, err := ParseVersion(otherVersion) + if err != nil { + return false, err + } + + if firstMajor > secondMajor { + return true, nil + } else if firstMajor == secondMajor { + if firstMinor > secondMinor { + return true, nil + } else if firstMinor == secondMinor && firstMicro >= secondMicro { + return true, nil + } + } + return false, nil +} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go b/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go new file mode 100644 index 0000000000000..25c3810b2aaf2 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/version/reconcile.go @@ -0,0 +1,49 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import "fmt" + +type ErrorIncompatible struct { + Config string + Supported []string +} + +func (e *ErrorIncompatible) Details() string { + return fmt.Sprintf("config is %q, plugin supports %q", e.Config, e.Supported) +} + +func (e *ErrorIncompatible) Error() string { + return fmt.Sprintf("incompatible CNI versions: %s", e.Details()) +} + +type Reconciler struct{} + +func (r *Reconciler) Check(configVersion string, pluginInfo PluginInfo) *ErrorIncompatible { + return r.CheckRaw(configVersion, pluginInfo.SupportedVersions()) +} + +func (*Reconciler) CheckRaw(configVersion string, supportedVersions []string) *ErrorIncompatible { + for _, supportedVersion := range supportedVersions { + if configVersion == supportedVersion { + return nil + } + } + + return &ErrorIncompatible{ + Config: configVersion, + Supported: supportedVersions, + } +} diff --git a/vendor/github.com/containernetworking/cni/pkg/version/version.go b/vendor/github.com/containernetworking/cni/pkg/version/version.go new file mode 100644 index 0000000000000..1326f8038e571 --- /dev/null +++ b/vendor/github.com/containernetworking/cni/pkg/version/version.go @@ -0,0 +1,89 @@ +// Copyright 2016 CNI authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "encoding/json" + "fmt" + + "github.com/containernetworking/cni/pkg/types" + types100 "github.com/containernetworking/cni/pkg/types/100" + "github.com/containernetworking/cni/pkg/types/create" +) + +// Current reports the version of the CNI spec implemented by this library +func Current() string { + return types100.ImplementedSpecVersion +} + +// Legacy PluginInfo describes a plugin that is backwards compatible with the +// CNI spec version 0.1.0. In particular, a runtime compiled against the 0.1.0 +// library ought to work correctly with a plugin that reports support for +// Legacy versions. +// +// Any future CNI spec versions which meet this definition should be added to +// this list. +var Legacy = PluginSupports("0.1.0", "0.2.0") +var All = PluginSupports("0.1.0", "0.2.0", "0.3.0", "0.3.1", "0.4.0", "1.0.0") + +// VersionsFrom returns a list of versions starting from min, inclusive +func VersionsStartingFrom(min string) PluginInfo { + out := []string{} + // cheat, just assume ordered + ok := false + for _, v := range All.SupportedVersions() { + if !ok && v == min { + ok = true + } + if ok { + out = append(out, v) + } + } + return PluginSupports(out...) +} + +// Finds a Result object matching the requested version (if any) and asks +// that object to parse the plugin result, returning an error if parsing failed. +func NewResult(version string, resultBytes []byte) (types.Result, error) { + return create.Create(version, resultBytes) +} + +// ParsePrevResult parses a prevResult in a NetConf structure and sets +// the NetConf's PrevResult member to the parsed Result object. +func ParsePrevResult(conf *types.NetConf) error { + if conf.RawPrevResult == nil { + return nil + } + + // Prior to 1.0.0, Result types may not marshal a CNIVersion. Since the + // result version must match the config version, if the Result's version + // is empty, inject the config version. + if ver, ok := conf.RawPrevResult["CNIVersion"]; !ok || ver == "" { + conf.RawPrevResult["CNIVersion"] = conf.CNIVersion + } + + resultBytes, err := json.Marshal(conf.RawPrevResult) + if err != nil { + return fmt.Errorf("could not serialize prevResult: %w", err) + } + + conf.RawPrevResult = nil + conf.PrevResult, err = create.Create(conf.CNIVersion, resultBytes) + if err != nil { + return fmt.Errorf("could not parse prevResult: %w", err) + } + + return nil +} diff --git a/vendor/github.com/moby/buildkit/executor/containerdexecutor/executor.go b/vendor/github.com/moby/buildkit/executor/containerdexecutor/executor.go new file mode 100644 index 0000000000000..43a05cccefe9c --- /dev/null +++ b/vendor/github.com/moby/buildkit/executor/containerdexecutor/executor.go @@ -0,0 +1,441 @@ +package containerdexecutor + +import ( + "context" + "io" + "io/ioutil" + "os" + "path/filepath" + "sync" + "syscall" + "time" + + "github.com/moby/buildkit/util/bklog" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + + "github.com/containerd/containerd" + "github.com/containerd/containerd/cio" + "github.com/containerd/containerd/mount" + containerdoci "github.com/containerd/containerd/oci" + "github.com/containerd/continuity/fs" + "github.com/docker/docker/pkg/idtools" + "github.com/moby/buildkit/executor" + "github.com/moby/buildkit/executor/oci" + gatewayapi "github.com/moby/buildkit/frontend/gateway/pb" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/network" + rootlessspecconv "github.com/moby/buildkit/util/rootless/specconv" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +type containerdExecutor struct { + client *containerd.Client + root string + networkProviders map[pb.NetMode]network.Provider + cgroupParent string + dnsConfig *oci.DNSConfig + running map[string]chan error + mu sync.Mutex + apparmorProfile string + traceSocket string + rootless bool +} + +// New creates a new executor backed by connection to containerd API +func New(client *containerd.Client, root, cgroup string, networkProviders map[pb.NetMode]network.Provider, dnsConfig *oci.DNSConfig, apparmorProfile string, traceSocket string, rootless bool) executor.Executor { + // clean up old hosts/resolv.conf file. ignore errors + os.RemoveAll(filepath.Join(root, "hosts")) + os.RemoveAll(filepath.Join(root, "resolv.conf")) + + return &containerdExecutor{ + client: client, + root: root, + networkProviders: networkProviders, + cgroupParent: cgroup, + dnsConfig: dnsConfig, + running: make(map[string]chan error), + apparmorProfile: apparmorProfile, + traceSocket: traceSocket, + rootless: rootless, + } +} + +func (w *containerdExecutor) Run(ctx context.Context, id string, root executor.Mount, mounts []executor.Mount, process executor.ProcessInfo, started chan<- struct{}) (err error) { + if id == "" { + id = identity.NewID() + } + + startedOnce := sync.Once{} + done := make(chan error, 1) + w.mu.Lock() + w.running[id] = done + w.mu.Unlock() + defer func() { + w.mu.Lock() + delete(w.running, id) + w.mu.Unlock() + done <- err + close(done) + if started != nil { + startedOnce.Do(func() { + close(started) + }) + } + }() + + meta := process.Meta + + resolvConf, err := oci.GetResolvConf(ctx, w.root, nil, w.dnsConfig) + if err != nil { + return err + } + + hostsFile, clean, err := oci.GetHostsFile(ctx, w.root, meta.ExtraHosts, nil, meta.Hostname) + if err != nil { + return err + } + if clean != nil { + defer clean() + } + + mountable, err := root.Src.Mount(ctx, false) + if err != nil { + return err + } + + rootMounts, release, err := mountable.Mount() + if err != nil { + return err + } + if release != nil { + defer release() + } + + lm := snapshot.LocalMounterWithMounts(rootMounts) + rootfsPath, err := lm.Mount() + if err != nil { + return err + } + defer lm.Unmount() + defer executor.MountStubsCleaner(rootfsPath, mounts)() + + uid, gid, sgids, err := oci.GetUser(rootfsPath, meta.User) + if err != nil { + return err + } + + identity := idtools.Identity{ + UID: int(uid), + GID: int(gid), + } + + newp, err := fs.RootPath(rootfsPath, meta.Cwd) + if err != nil { + return errors.Wrapf(err, "working dir %s points to invalid target", newp) + } + if _, err := os.Stat(newp); err != nil { + if err := idtools.MkdirAllAndChown(newp, 0755, identity); err != nil { + return errors.Wrapf(err, "failed to create working directory %s", newp) + } + } + + provider, ok := w.networkProviders[meta.NetMode] + if !ok { + return errors.Errorf("unknown network mode %s", meta.NetMode) + } + namespace, err := provider.New() + if err != nil { + return err + } + defer namespace.Close() + + if meta.NetMode == pb.NetMode_HOST { + bklog.G(ctx).Info("enabling HostNetworking") + } + + opts := []containerdoci.SpecOpts{oci.WithUIDGID(uid, gid, sgids)} + if meta.ReadonlyRootFS { + opts = append(opts, containerdoci.WithRootFSReadonly()) + } + + processMode := oci.ProcessSandbox // FIXME(AkihiroSuda) + spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.cgroupParent, processMode, nil, w.apparmorProfile, w.traceSocket, opts...) + if err != nil { + return err + } + defer cleanup() + spec.Process.Terminal = meta.Tty + if w.rootless { + if err := rootlessspecconv.ToRootless(spec); err != nil { + return err + } + } + + container, err := w.client.NewContainer(ctx, id, + containerd.WithSpec(spec), + ) + if err != nil { + return err + } + + defer func() { + if err1 := container.Delete(context.TODO()); err == nil && err1 != nil { + err = errors.Wrapf(err1, "failed to delete container %s", id) + } + }() + + fixProcessOutput(&process) + cioOpts := []cio.Opt{cio.WithStreams(process.Stdin, process.Stdout, process.Stderr)} + if meta.Tty { + cioOpts = append(cioOpts, cio.WithTerminal) + } + + task, err := container.NewTask(ctx, cio.NewCreator(cioOpts...), containerd.WithRootFS([]mount.Mount{{ + Source: rootfsPath, + Type: "bind", + Options: []string{"rbind"}, + }})) + if err != nil { + return err + } + + defer func() { + if _, err1 := task.Delete(context.TODO()); err == nil && err1 != nil { + err = errors.Wrapf(err1, "failed to delete task %s", id) + } + }() + + trace.SpanFromContext(ctx).AddEvent("Container created") + err = w.runProcess(ctx, task, process.Resize, process.Signal, func() { + startedOnce.Do(func() { + trace.SpanFromContext(ctx).AddEvent("Container started") + if started != nil { + close(started) + } + }) + }) + return err +} + +func (w *containerdExecutor) Exec(ctx context.Context, id string, process executor.ProcessInfo) (err error) { + meta := process.Meta + + // first verify the container is running, if we get an error assume the container + // is in the process of being created and check again every 100ms or until + // context is canceled. + + var container containerd.Container + var task containerd.Task + for { + w.mu.Lock() + done, ok := w.running[id] + w.mu.Unlock() + + if !ok { + return errors.Errorf("container %s not found", id) + } + + if container == nil { + container, _ = w.client.LoadContainer(ctx, id) + } + if container != nil && task == nil { + task, _ = container.Task(ctx, nil) + } + if task != nil { + status, _ := task.Status(ctx) + if status.Status == containerd.Running { + break + } + } + select { + case <-ctx.Done(): + return ctx.Err() + case err, ok := <-done: + if !ok || err == nil { + return errors.Errorf("container %s has stopped", id) + } + return errors.Wrapf(err, "container %s has exited with error", id) + case <-time.After(100 * time.Millisecond): + continue + } + } + + spec, err := container.Spec(ctx) + if err != nil { + return errors.WithStack(err) + } + + proc := spec.Process + + // TODO how do we get rootfsPath for oci.GetUser in case user passed in username rather than uid:gid? + // For now only support uid:gid + if meta.User != "" { + uid, gid, err := oci.ParseUIDGID(meta.User) + if err != nil { + return errors.WithStack(err) + } + proc.User = specs.User{ + UID: uid, + GID: gid, + AdditionalGids: []uint32{}, + } + } + + proc.Terminal = meta.Tty + proc.Args = meta.Args + if meta.Cwd != "" { + spec.Process.Cwd = meta.Cwd + } + if len(process.Meta.Env) > 0 { + spec.Process.Env = process.Meta.Env + } + + fixProcessOutput(&process) + cioOpts := []cio.Opt{cio.WithStreams(process.Stdin, process.Stdout, process.Stderr)} + if meta.Tty { + cioOpts = append(cioOpts, cio.WithTerminal) + } + + taskProcess, err := task.Exec(ctx, identity.NewID(), proc, cio.NewCreator(cioOpts...)) + if err != nil { + return errors.WithStack(err) + } + + err = w.runProcess(ctx, taskProcess, process.Resize, process.Signal, nil) + return err +} + +func fixProcessOutput(process *executor.ProcessInfo) { + // It seems like if containerd has one of stdin, stdout or stderr then the + // others need to be present as well otherwise we get this error: + // failed to start io pipe copy: unable to copy pipes: containerd-shim: opening file "" failed: open : no such file or directory: unknown + // So just stub out any missing output + if process.Stdout == nil { + process.Stdout = &nopCloser{ioutil.Discard} + } + if process.Stderr == nil { + process.Stderr = &nopCloser{ioutil.Discard} + } +} + +func (w *containerdExecutor) runProcess(ctx context.Context, p containerd.Process, resize <-chan executor.WinSize, signal <-chan syscall.Signal, started func()) error { + // Not using `ctx` here because the context passed only affects the statusCh which we + // don't want cancelled when ctx.Done is sent. We want to process statusCh on cancel. + statusCh, err := p.Wait(context.Background()) + if err != nil { + return err + } + + io := p.IO() + defer func() { + io.Wait() + io.Close() + }() + + err = p.Start(ctx) + if err != nil { + return err + } + + if started != nil { + started() + } + + p.CloseIO(ctx, containerd.WithStdinCloser) + + // handle signals (and resize) in separate go loop so it does not + // potentially block the container cancel/exit status loop below. + eventCtx, eventCancel := context.WithCancel(ctx) + defer eventCancel() + go func() { + for { + select { + case <-eventCtx.Done(): + return + case size, ok := <-resize: + if !ok { + return // chan closed + } + err = p.Resize(eventCtx, size.Cols, size.Rows) + if err != nil { + bklog.G(eventCtx).Warnf("Failed to resize %s: %s", p.ID(), err) + } + } + } + }() + go func() { + for { + select { + case <-eventCtx.Done(): + return + case sig, ok := <-signal: + if !ok { + return // chan closed + } + err = p.Kill(eventCtx, sig) + if err != nil { + bklog.G(eventCtx).Warnf("Failed to signal %s: %s", p.ID(), err) + } + } + } + }() + + var cancel func() + var killCtxDone <-chan struct{} + ctxDone := ctx.Done() + for { + select { + case <-ctxDone: + ctxDone = nil + var killCtx context.Context + killCtx, cancel = context.WithTimeout(context.Background(), 10*time.Second) + killCtxDone = killCtx.Done() + p.Kill(killCtx, syscall.SIGKILL) + io.Cancel() + case status := <-statusCh: + if cancel != nil { + cancel() + } + trace.SpanFromContext(ctx).AddEvent( + "Container exited", + trace.WithAttributes( + attribute.Int("exit.code", int(status.ExitCode())), + ), + ) + if status.ExitCode() != 0 { + exitErr := &gatewayapi.ExitError{ + ExitCode: status.ExitCode(), + Err: status.Error(), + } + if status.ExitCode() == gatewayapi.UnknownExitStatus && status.Error() != nil { + exitErr.Err = errors.Wrap(status.Error(), "failure waiting for process") + } + select { + case <-ctx.Done(): + exitErr.Err = errors.Wrap(ctx.Err(), exitErr.Error()) + default: + } + return exitErr + } + return nil + case <-killCtxDone: + if cancel != nil { + cancel() + } + io.Cancel() + return errors.Errorf("failed to kill process on cancel") + } + } +} + +type nopCloser struct { + io.Writer +} + +func (c *nopCloser) Close() error { + return nil +} diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/export.go b/vendor/github.com/moby/buildkit/exporter/containerimage/export.go new file mode 100644 index 0000000000000..42f6e9dfdf9a0 --- /dev/null +++ b/vendor/github.com/moby/buildkit/exporter/containerimage/export.go @@ -0,0 +1,490 @@ +package containerimage + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "strconv" + "strings" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/remotes/docker" + "github.com/containerd/containerd/rootfs" + "github.com/moby/buildkit/cache" + cacheconfig "github.com/moby/buildkit/cache/config" + "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/util/compression" + "github.com/moby/buildkit/util/contentutil" + "github.com/moby/buildkit/util/leaseutil" + "github.com/moby/buildkit/util/push" + digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/identity" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + keyImageName = "name" + keyPush = "push" + keyPushByDigest = "push-by-digest" + keyInsecure = "registry.insecure" + keyUnpack = "unpack" + keyDanglingPrefix = "dangling-name-prefix" + keyNameCanonical = "name-canonical" + keyLayerCompression = "compression" + keyForceCompression = "force-compression" + keyCompressionLevel = "compression-level" + keyBuildInfo = "buildinfo" + keyBuildInfoAttrs = "buildinfo-attrs" + ociTypes = "oci-mediatypes" + // preferNondistLayersKey is an exporter option which can be used to mark a layer as non-distributable if the layer reference was + // already found to use a non-distributable media type. + // When this option is not set, the exporter will change the media type of the layer to a distributable one. + preferNondistLayersKey = "prefer-nondist-layers" +) + +type Opt struct { + SessionManager *session.Manager + ImageWriter *ImageWriter + Images images.Store + RegistryHosts docker.RegistryHosts + LeaseManager leases.Manager +} + +type imageExporter struct { + opt Opt +} + +// New returns a new containerimage exporter instance that supports exporting +// to an image store and pushing the image to registry. +// This exporter supports following values in returned kv map: +// - containerimage.digest - The digest of the root manifest for the image. +func New(opt Opt) (exporter.Exporter, error) { + im := &imageExporter{opt: opt} + return im, nil +} + +func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) { + i := &imageExporterInstance{ + imageExporter: e, + layerCompression: compression.Default, + buildInfo: true, + } + + var esgz bool + for k, v := range opt { + switch k { + case keyImageName: + i.targetName = v + case keyPush: + if v == "" { + i.push = true + continue + } + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value specified for %s", k) + } + i.push = b + case keyPushByDigest: + if v == "" { + i.pushByDigest = true + continue + } + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value specified for %s", k) + } + i.pushByDigest = b + case keyInsecure: + if v == "" { + i.insecure = true + continue + } + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value specified for %s", k) + } + i.insecure = b + case keyUnpack: + if v == "" { + i.unpack = true + continue + } + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value specified for %s", k) + } + i.unpack = b + case ociTypes: + if v == "" { + i.ociTypes = true + continue + } + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value specified for %s", k) + } + i.ociTypes = b + case keyDanglingPrefix: + i.danglingPrefix = v + case keyNameCanonical: + if v == "" { + i.nameCanonical = true + continue + } + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value specified for %s", k) + } + i.nameCanonical = b + case keyLayerCompression: + switch v { + case "gzip": + i.layerCompression = compression.Gzip + case "estargz": + i.layerCompression = compression.EStargz + esgz = true + case "zstd": + i.layerCompression = compression.Zstd + case "uncompressed": + i.layerCompression = compression.Uncompressed + default: + return nil, errors.Errorf("unsupported layer compression type: %v", v) + } + case keyForceCompression: + if v == "" { + i.forceCompression = true + continue + } + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value %s specified for %s", v, k) + } + i.forceCompression = b + case keyCompressionLevel: + ii, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return nil, errors.Wrapf(err, "non-integer value %s specified for %s", v, k) + } + v := int(ii) + i.compressionLevel = &v + case keyBuildInfo: + if v == "" { + i.buildInfo = true + continue + } + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value specified for %s", k) + } + i.buildInfo = b + case keyBuildInfoAttrs: + if v == "" { + i.buildInfoAttrs = false + continue + } + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value specified for %s", k) + } + i.buildInfoAttrs = b + case preferNondistLayersKey: + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value %s specified for %s", v, k) + } + i.preferNondistLayers = b + default: + if i.meta == nil { + i.meta = make(map[string][]byte) + } + i.meta[k] = []byte(v) + } + } + if esgz && !i.ociTypes { + logrus.Warn("forcibly turning on oci-mediatype mode for estargz") + i.ociTypes = true + } + return i, nil +} + +type imageExporterInstance struct { + *imageExporter + targetName string + push bool + pushByDigest bool + unpack bool + insecure bool + ociTypes bool + nameCanonical bool + danglingPrefix string + layerCompression compression.Type + forceCompression bool + compressionLevel *int + buildInfo bool + buildInfoAttrs bool + meta map[string][]byte + preferNondistLayers bool +} + +func (e *imageExporterInstance) Name() string { + return "exporting to image" +} + +func (e *imageExporterInstance) Config() exporter.Config { + return exporter.Config{ + Compression: e.compression(), + } +} + +func (e *imageExporterInstance) compression() compression.Config { + c := compression.New(e.layerCompression).SetForce(e.forceCompression) + if e.compressionLevel != nil { + c = c.SetLevel(*e.compressionLevel) + } + return c +} + +func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source, sessionID string) (map[string]string, error) { + if src.Metadata == nil { + src.Metadata = make(map[string][]byte) + } + for k, v := range e.meta { + src.Metadata[k] = v + } + + ctx, done, err := leaseutil.WithLease(ctx, e.opt.LeaseManager, leaseutil.MakeTemporary) + if err != nil { + return nil, err + } + defer done(context.TODO()) + + refCfg := e.refCfg() + desc, err := e.opt.ImageWriter.Commit(ctx, src, e.ociTypes, refCfg, e.buildInfo, e.buildInfoAttrs, sessionID) + if err != nil { + return nil, err + } + + defer func() { + e.opt.ImageWriter.ContentStore().Delete(context.TODO(), desc.Digest) + }() + + resp := make(map[string]string) + + if n, ok := src.Metadata["image.name"]; e.targetName == "*" && ok { + e.targetName = string(n) + } + + nameCanonical := e.nameCanonical + if e.targetName == "" && e.danglingPrefix != "" { + e.targetName = e.danglingPrefix + "@" + desc.Digest.String() + nameCanonical = false + } + + if e.targetName != "" { + targetNames := strings.Split(e.targetName, ",") + for _, targetName := range targetNames { + if e.opt.Images != nil { + tagDone := oneOffProgress(ctx, "naming to "+targetName) + img := images.Image{ + Target: *desc, + CreatedAt: time.Now(), + } + sfx := []string{""} + if nameCanonical { + sfx = append(sfx, "@"+desc.Digest.String()) + } + for _, sfx := range sfx { + img.Name = targetName + sfx + if _, err := e.opt.Images.Update(ctx, img); err != nil { + if !errors.Is(err, errdefs.ErrNotFound) { + return nil, tagDone(err) + } + + if _, err := e.opt.Images.Create(ctx, img); err != nil { + return nil, tagDone(err) + } + } + } + tagDone(nil) + + if e.unpack { + if err := e.unpackImage(ctx, img, src, session.NewGroup(sessionID)); err != nil { + return nil, err + } + } + } + if e.push { + annotations := map[digest.Digest]map[string]string{} + mprovider := contentutil.NewMultiProvider(e.opt.ImageWriter.ContentStore()) + if src.Ref != nil { + remotes, err := src.Ref.GetRemotes(ctx, false, refCfg, false, session.NewGroup(sessionID)) + if err != nil { + return nil, err + } + remote := remotes[0] + for _, desc := range remote.Descriptors { + mprovider.Add(desc.Digest, remote.Provider) + addAnnotations(annotations, desc) + } + } + if len(src.Refs) > 0 { + for _, r := range src.Refs { + remotes, err := r.GetRemotes(ctx, false, refCfg, false, session.NewGroup(sessionID)) + if err != nil { + return nil, err + } + remote := remotes[0] + for _, desc := range remote.Descriptors { + mprovider.Add(desc.Digest, remote.Provider) + addAnnotations(annotations, desc) + } + } + } + + if err := push.Push(ctx, e.opt.SessionManager, sessionID, mprovider, e.opt.ImageWriter.ContentStore(), desc.Digest, targetName, e.insecure, e.opt.RegistryHosts, e.pushByDigest, annotations); err != nil { + return nil, err + } + } + } + resp["image.name"] = e.targetName + } + + resp[exptypes.ExporterImageDigestKey] = desc.Digest.String() + if v, ok := desc.Annotations[exptypes.ExporterConfigDigestKey]; ok { + resp[exptypes.ExporterImageConfigDigestKey] = v + delete(desc.Annotations, exptypes.ExporterConfigDigestKey) + } + + dtdesc, err := json.Marshal(desc) + if err != nil { + return nil, err + } + resp[exptypes.ExporterImageDescriptorKey] = base64.StdEncoding.EncodeToString(dtdesc) + + return resp, nil +} + +func (e *imageExporterInstance) refCfg() cacheconfig.RefConfig { + return cacheconfig.RefConfig{ + Compression: e.compression(), + PreferNonDistributable: e.preferNondistLayers, + } +} + +func (e *imageExporterInstance) unpackImage(ctx context.Context, img images.Image, src exporter.Source, s session.Group) (err0 error) { + unpackDone := oneOffProgress(ctx, "unpacking to "+img.Name) + defer func() { + unpackDone(err0) + }() + + var ( + contentStore = e.opt.ImageWriter.ContentStore() + applier = e.opt.ImageWriter.Applier() + snapshotter = e.opt.ImageWriter.Snapshotter() + ) + + // fetch manifest by default platform + manifest, err := images.Manifest(ctx, contentStore, img.Target, platforms.Default()) + if err != nil { + return err + } + + topLayerRef := src.Ref + if len(src.Refs) > 0 { + if r, ok := src.Refs[defaultPlatform()]; ok { + topLayerRef = r + } else { + return errors.Errorf("no reference for default platform %s", defaultPlatform()) + } + } + + remotes, err := topLayerRef.GetRemotes(ctx, true, e.refCfg(), false, s) + if err != nil { + return err + } + remote := remotes[0] + + // ensure the content for each layer exists locally in case any are lazy + if unlazier, ok := remote.Provider.(cache.Unlazier); ok { + if err := unlazier.Unlazy(ctx); err != nil { + return err + } + } + + layers, err := getLayers(ctx, remote.Descriptors, manifest) + if err != nil { + return err + } + + // get containerd snapshotter + ctrdSnapshotter, release := snapshot.NewContainerdSnapshotter(snapshotter) + defer release() + + var chain []digest.Digest + for _, layer := range layers { + if _, err := rootfs.ApplyLayer(ctx, layer, chain, ctrdSnapshotter, applier); err != nil { + return err + } + chain = append(chain, layer.Diff.Digest) + } + + var ( + keyGCLabel = fmt.Sprintf("containerd.io/gc.ref.snapshot.%s", snapshotter.Name()) + valueGCLabel = identity.ChainID(chain).String() + ) + + cinfo := content.Info{ + Digest: manifest.Config.Digest, + Labels: map[string]string{keyGCLabel: valueGCLabel}, + } + _, err = contentStore.Update(ctx, cinfo, fmt.Sprintf("labels.%s", keyGCLabel)) + return err +} + +func getLayers(ctx context.Context, descs []ocispecs.Descriptor, manifest ocispecs.Manifest) ([]rootfs.Layer, error) { + if len(descs) != len(manifest.Layers) { + return nil, errors.Errorf("mismatched image rootfs and manifest layers") + } + + layers := make([]rootfs.Layer, len(descs)) + for i, desc := range descs { + layers[i].Diff = ocispecs.Descriptor{ + MediaType: ocispecs.MediaTypeImageLayer, + Digest: digest.Digest(desc.Annotations["containerd.io/uncompressed"]), + } + layers[i].Blob = manifest.Layers[i] + } + return layers, nil +} + +func addAnnotations(m map[digest.Digest]map[string]string, desc ocispecs.Descriptor) { + if desc.Annotations == nil { + return + } + a, ok := m[desc.Digest] + if !ok { + m[desc.Digest] = desc.Annotations + return + } + for k, v := range desc.Annotations { + a[k] = v + } +} + +func defaultPlatform() string { + // Use normalized platform string to avoid the mismatch with platform options which + // are normalized using platforms.Normalize() + return platforms.Format(platforms.Normalize(platforms.DefaultSpec())) +} diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/writer.go b/vendor/github.com/moby/buildkit/exporter/containerimage/writer.go new file mode 100644 index 0000000000000..e5ec1519803d3 --- /dev/null +++ b/vendor/github.com/moby/buildkit/exporter/containerimage/writer.go @@ -0,0 +1,572 @@ +package containerimage + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/platforms" + "github.com/moby/buildkit/cache" + cacheconfig "github.com/moby/buildkit/cache/config" + "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/bklog" + "github.com/moby/buildkit/util/buildinfo" + binfotypes "github.com/moby/buildkit/util/buildinfo/types" + "github.com/moby/buildkit/util/compression" + "github.com/moby/buildkit/util/progress" + "github.com/moby/buildkit/util/system" + "github.com/moby/buildkit/util/tracing" + digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "golang.org/x/sync/errgroup" +) + +type WriterOpt struct { + Snapshotter snapshot.Snapshotter + ContentStore content.Store + Applier diff.Applier + Differ diff.Comparer +} + +func NewImageWriter(opt WriterOpt) (*ImageWriter, error) { + return &ImageWriter{opt: opt}, nil +} + +type ImageWriter struct { + opt WriterOpt +} + +func (ic *ImageWriter) Commit(ctx context.Context, inp exporter.Source, oci bool, refCfg cacheconfig.RefConfig, buildInfo bool, buildInfoAttrs bool, sessionID string) (*ocispecs.Descriptor, error) { + platformsBytes, ok := inp.Metadata[exptypes.ExporterPlatformsKey] + + if len(inp.Refs) > 0 && !ok { + return nil, errors.Errorf("unable to export multiple refs, missing platforms mapping") + } + + if len(inp.Refs) == 0 { + remotes, err := ic.exportLayers(ctx, refCfg, session.NewGroup(sessionID), inp.Ref) + if err != nil { + return nil, err + } + + var dtbi []byte + if buildInfo { + if dtbi, err = buildinfo.Format(inp.Metadata[exptypes.ExporterBuildInfo], buildinfo.FormatOpts{ + RemoveAttrs: !buildInfoAttrs, + }); err != nil { + return nil, err + } + } + + mfstDesc, configDesc, err := ic.commitDistributionManifest(ctx, inp.Ref, inp.Metadata[exptypes.ExporterImageConfigKey], &remotes[0], oci, inp.Metadata[exptypes.ExporterInlineCache], dtbi) + if err != nil { + return nil, err + } + if mfstDesc.Annotations == nil { + mfstDesc.Annotations = make(map[string]string) + } + mfstDesc.Annotations[exptypes.ExporterConfigDigestKey] = configDesc.Digest.String() + + return mfstDesc, nil + } + + var p exptypes.Platforms + if err := json.Unmarshal(platformsBytes, &p); err != nil { + return nil, errors.Wrapf(err, "failed to parse platforms passed to exporter") + } + + if len(p.Platforms) != len(inp.Refs) { + return nil, errors.Errorf("number of platforms does not match references %d %d", len(p.Platforms), len(inp.Refs)) + } + + refs := make([]cache.ImmutableRef, 0, len(inp.Refs)) + remotesMap := make(map[string]int, len(inp.Refs)) + for id, r := range inp.Refs { + remotesMap[id] = len(refs) + refs = append(refs, r) + } + + remotes, err := ic.exportLayers(ctx, refCfg, session.NewGroup(sessionID), refs...) + if err != nil { + return nil, err + } + + idx := struct { + // MediaType is reserved in the OCI spec but + // excluded from go types. + MediaType string `json:"mediaType,omitempty"` + + ocispecs.Index + }{ + MediaType: ocispecs.MediaTypeImageIndex, + Index: ocispecs.Index{ + Versioned: specs.Versioned{ + SchemaVersion: 2, + }, + }, + } + + if !oci { + idx.MediaType = images.MediaTypeDockerSchema2ManifestList + } + + labels := map[string]string{} + + for i, p := range p.Platforms { + r, ok := inp.Refs[p.ID] + if !ok { + return nil, errors.Errorf("failed to find ref for ID %s", p.ID) + } + config := inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, p.ID)] + inlineCache := inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterInlineCache, p.ID)] + + var dtbi []byte + if buildInfo { + if dtbi, err = buildinfo.Format(inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, p.ID)], buildinfo.FormatOpts{ + RemoveAttrs: !buildInfoAttrs, + }); err != nil { + return nil, err + } + } + + desc, _, err := ic.commitDistributionManifest(ctx, r, config, &remotes[remotesMap[p.ID]], oci, inlineCache, dtbi) + if err != nil { + return nil, err + } + dp := p.Platform + desc.Platform = &dp + idx.Manifests = append(idx.Manifests, *desc) + + labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i)] = desc.Digest.String() + } + + idxBytes, err := json.MarshalIndent(idx, "", " ") + if err != nil { + return nil, errors.Wrap(err, "failed to marshal index") + } + + idxDigest := digest.FromBytes(idxBytes) + idxDesc := ocispecs.Descriptor{ + Digest: idxDigest, + Size: int64(len(idxBytes)), + MediaType: idx.MediaType, + } + idxDone := oneOffProgress(ctx, "exporting manifest list "+idxDigest.String()) + + if err := content.WriteBlob(ctx, ic.opt.ContentStore, idxDigest.String(), bytes.NewReader(idxBytes), idxDesc, content.WithLabels(labels)); err != nil { + return nil, idxDone(errors.Wrapf(err, "error writing manifest list blob %s", idxDigest)) + } + idxDone(nil) + + return &idxDesc, nil +} + +func (ic *ImageWriter) exportLayers(ctx context.Context, refCfg cacheconfig.RefConfig, s session.Group, refs ...cache.ImmutableRef) ([]solver.Remote, error) { + attr := []attribute.KeyValue{ + attribute.String("exportLayers.compressionType", refCfg.Compression.Type.String()), + attribute.Bool("exportLayers.forceCompression", refCfg.Compression.Force), + } + if refCfg.Compression.Level != nil { + attr = append(attr, attribute.Int("exportLayers.compressionLevel", *refCfg.Compression.Level)) + } + span, ctx := tracing.StartSpan(ctx, "export layers", trace.WithAttributes(attr...)) + + eg, ctx := errgroup.WithContext(ctx) + layersDone := oneOffProgress(ctx, "exporting layers") + + out := make([]solver.Remote, len(refs)) + + for i, ref := range refs { + func(i int, ref cache.ImmutableRef) { + if ref == nil { + return + } + eg.Go(func() error { + remotes, err := ref.GetRemotes(ctx, true, refCfg, false, s) + if err != nil { + return err + } + remote := remotes[0] + out[i] = *remote + return nil + }) + }(i, ref) + } + + err := layersDone(eg.Wait()) + tracing.FinishWithError(span, err) + return out, err +} + +func (ic *ImageWriter) commitDistributionManifest(ctx context.Context, ref cache.ImmutableRef, config []byte, remote *solver.Remote, oci bool, inlineCache []byte, buildInfo []byte) (*ocispecs.Descriptor, *ocispecs.Descriptor, error) { + if len(config) == 0 { + var err error + config, err = emptyImageConfig() + if err != nil { + return nil, nil, err + } + } + + if remote == nil { + remote = &solver.Remote{ + Provider: ic.opt.ContentStore, + } + } + + history, err := parseHistoryFromConfig(config) + if err != nil { + return nil, nil, err + } + + remote, history = normalizeLayersAndHistory(ctx, remote, history, ref, oci) + + config, err = patchImageConfig(config, remote.Descriptors, history, inlineCache, buildInfo) + if err != nil { + return nil, nil, err + } + + var ( + configDigest = digest.FromBytes(config) + manifestType = ocispecs.MediaTypeImageManifest + configType = ocispecs.MediaTypeImageConfig + ) + + // Use docker media types for older Docker versions and registries + if !oci { + manifestType = images.MediaTypeDockerSchema2Manifest + configType = images.MediaTypeDockerSchema2Config + } + + mfst := struct { + // MediaType is reserved in the OCI spec but + // excluded from go types. + MediaType string `json:"mediaType,omitempty"` + + ocispecs.Manifest + }{ + MediaType: manifestType, + Manifest: ocispecs.Manifest{ + Versioned: specs.Versioned{ + SchemaVersion: 2, + }, + Config: ocispecs.Descriptor{ + Digest: configDigest, + Size: int64(len(config)), + MediaType: configType, + }, + }, + } + + labels := map[string]string{ + "containerd.io/gc.ref.content.0": configDigest.String(), + } + + for i, desc := range remote.Descriptors { + // oci supports annotations but don't export internal annotations + if oci { + delete(desc.Annotations, "containerd.io/uncompressed") + delete(desc.Annotations, "buildkit/createdat") + for k := range desc.Annotations { + if strings.HasPrefix(k, "containerd.io/distribution.source.") { + delete(desc.Annotations, k) + } + } + } else { + desc.Annotations = nil + } + + mfst.Layers = append(mfst.Layers, desc) + labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i+1)] = desc.Digest.String() + } + + mfstJSON, err := json.MarshalIndent(mfst, "", " ") + if err != nil { + return nil, nil, errors.Wrap(err, "failed to marshal manifest") + } + + mfstDigest := digest.FromBytes(mfstJSON) + mfstDesc := ocispecs.Descriptor{ + Digest: mfstDigest, + Size: int64(len(mfstJSON)), + } + mfstDone := oneOffProgress(ctx, "exporting manifest "+mfstDigest.String()) + + if err := content.WriteBlob(ctx, ic.opt.ContentStore, mfstDigest.String(), bytes.NewReader(mfstJSON), mfstDesc, content.WithLabels((labels))); err != nil { + return nil, nil, mfstDone(errors.Wrapf(err, "error writing manifest blob %s", mfstDigest)) + } + mfstDone(nil) + + configDesc := ocispecs.Descriptor{ + Digest: configDigest, + Size: int64(len(config)), + MediaType: configType, + } + configDone := oneOffProgress(ctx, "exporting config "+configDigest.String()) + + if err := content.WriteBlob(ctx, ic.opt.ContentStore, configDigest.String(), bytes.NewReader(config), configDesc); err != nil { + return nil, nil, configDone(errors.Wrap(err, "error writing config blob")) + } + configDone(nil) + + return &ocispecs.Descriptor{ + Digest: mfstDigest, + Size: int64(len(mfstJSON)), + MediaType: manifestType, + }, &configDesc, nil +} + +func (ic *ImageWriter) ContentStore() content.Store { + return ic.opt.ContentStore +} + +func (ic *ImageWriter) Snapshotter() snapshot.Snapshotter { + return ic.opt.Snapshotter +} + +func (ic *ImageWriter) Applier() diff.Applier { + return ic.opt.Applier +} + +func emptyImageConfig() ([]byte, error) { + pl := platforms.Normalize(platforms.DefaultSpec()) + + type image struct { + ocispecs.Image + + // Variant defines platform variant. To be added to OCI. + Variant string `json:"variant,omitempty"` + } + + img := image{ + Image: ocispecs.Image{ + Architecture: pl.Architecture, + OS: pl.OS, + }, + Variant: pl.Variant, + } + img.RootFS.Type = "layers" + img.Config.WorkingDir = "/" + img.Config.Env = []string{"PATH=" + system.DefaultPathEnv(pl.OS)} + dt, err := json.Marshal(img) + return dt, errors.Wrap(err, "failed to create empty image config") +} + +func parseHistoryFromConfig(dt []byte) ([]ocispecs.History, error) { + var config struct { + History []ocispecs.History + } + if err := json.Unmarshal(dt, &config); err != nil { + return nil, errors.Wrap(err, "failed to unmarshal history from config") + } + return config.History, nil +} + +func patchImageConfig(dt []byte, descs []ocispecs.Descriptor, history []ocispecs.History, cache []byte, buildInfo []byte) ([]byte, error) { + m := map[string]json.RawMessage{} + if err := json.Unmarshal(dt, &m); err != nil { + return nil, errors.Wrap(err, "failed to parse image config for patch") + } + + var rootFS ocispecs.RootFS + rootFS.Type = "layers" + for _, desc := range descs { + rootFS.DiffIDs = append(rootFS.DiffIDs, digest.Digest(desc.Annotations["containerd.io/uncompressed"])) + } + dt, err := json.Marshal(rootFS) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal rootfs") + } + m["rootfs"] = dt + + dt, err = json.Marshal(history) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal history") + } + m["history"] = dt + + if _, ok := m["created"]; !ok { + var tm *time.Time + for _, h := range history { + if h.Created != nil { + tm = h.Created + } + } + dt, err = json.Marshal(&tm) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal creation time") + } + m["created"] = dt + } + + if cache != nil { + dt, err := json.Marshal(cache) + if err != nil { + return nil, err + } + m["moby.buildkit.cache.v0"] = dt + } + + if buildInfo != nil { + dt, err := json.Marshal(buildInfo) + if err != nil { + return nil, err + } + m[binfotypes.ImageConfigField] = dt + } else if _, ok := m[binfotypes.ImageConfigField]; ok { + delete(m, binfotypes.ImageConfigField) + } + + dt, err = json.Marshal(m) + return dt, errors.Wrap(err, "failed to marshal config after patch") +} + +func normalizeLayersAndHistory(ctx context.Context, remote *solver.Remote, history []ocispecs.History, ref cache.ImmutableRef, oci bool) (*solver.Remote, []ocispecs.History) { + refMeta := getRefMetadata(ref, len(remote.Descriptors)) + + var historyLayers int + for _, h := range history { + if !h.EmptyLayer { + historyLayers++ + } + } + + if historyLayers > len(remote.Descriptors) { + // this case shouldn't happen but if it does force set history layers empty + // from the bottom + bklog.G(ctx).Warn("invalid image config with unaccounted layers") + historyCopy := make([]ocispecs.History, 0, len(history)) + var l int + for _, h := range history { + if l >= len(remote.Descriptors) { + h.EmptyLayer = true + } + if !h.EmptyLayer { + l++ + } + historyCopy = append(historyCopy, h) + } + history = historyCopy + } + + if len(remote.Descriptors) > historyLayers { + // some history items are missing. add them based on the ref metadata + for _, md := range refMeta[historyLayers:] { + history = append(history, ocispecs.History{ + Created: md.createdAt, + CreatedBy: md.description, + Comment: "buildkit.exporter.image.v0", + }) + } + } + + var layerIndex int + for i, h := range history { + if !h.EmptyLayer { + if h.Created == nil { + h.Created = refMeta[layerIndex].createdAt + } + layerIndex++ + } + history[i] = h + } + + // Find the first new layer time. Otherwise, the history item for a first + // metadata command would be the creation time of a base image layer. + // If there is no such then the last layer with timestamp. + var created *time.Time + var noCreatedTime bool + for _, h := range history { + if h.Created != nil { + created = h.Created + if noCreatedTime { + break + } + } else { + noCreatedTime = true + } + } + + // Fill in created times for all history items to be either the first new + // layer time or the previous layer. + noCreatedTime = false + for i, h := range history { + if h.Created != nil { + if noCreatedTime { + created = h.Created + } + } else { + noCreatedTime = true + h.Created = created + } + history[i] = h + } + + // convert between oci and docker media types (or vice versa) if needed + remote.Descriptors = compression.ConvertAllLayerMediaTypes(oci, remote.Descriptors...) + + return remote, history +} + +type refMetadata struct { + description string + createdAt *time.Time +} + +func getRefMetadata(ref cache.ImmutableRef, limit int) []refMetadata { + if ref == nil { + return make([]refMetadata, limit) + } + + layerChain := ref.LayerChain() + defer layerChain.Release(context.TODO()) + + if limit < len(layerChain) { + layerChain = layerChain[len(layerChain)-limit:] + } + + metas := make([]refMetadata, len(layerChain)) + for i, layer := range layerChain { + meta := &metas[i] + + if description := layer.GetDescription(); description != "" { + meta.description = description + } else { + meta.description = "created by buildkit" // shouldn't be shown but don't fail build + } + + createdAt := layer.GetCreatedAt() + meta.createdAt = &createdAt + } + return metas +} + +func oneOffProgress(ctx context.Context, id string) func(err error) error { + pw, _, _ := progress.NewFromContext(ctx) + now := time.Now() + st := progress.Status{ + Started: &now, + } + pw.Write(id, st) + return func(err error) error { + // TODO: set error on status + now := time.Now() + st.Completed = &now + pw.Write(id, st) + pw.Close() + return err + } +} diff --git a/vendor/github.com/moby/buildkit/exporter/oci/export.go b/vendor/github.com/moby/buildkit/exporter/oci/export.go new file mode 100644 index 0000000000000..153211c9b709a --- /dev/null +++ b/vendor/github.com/moby/buildkit/exporter/oci/export.go @@ -0,0 +1,365 @@ +package oci + +import ( + "context" + "encoding/base64" + "encoding/json" + "strconv" + "strings" + "time" + + archiveexporter "github.com/containerd/containerd/images/archive" + "github.com/containerd/containerd/leases" + "github.com/docker/distribution/reference" + "github.com/moby/buildkit/cache" + cacheconfig "github.com/moby/buildkit/cache/config" + "github.com/moby/buildkit/exporter" + "github.com/moby/buildkit/exporter/containerimage" + "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/session/filesync" + "github.com/moby/buildkit/util/compression" + "github.com/moby/buildkit/util/contentutil" + "github.com/moby/buildkit/util/grpcerrors" + "github.com/moby/buildkit/util/leaseutil" + "github.com/moby/buildkit/util/progress" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "google.golang.org/grpc/codes" +) + +type ExporterVariant string + +const ( + keyImageName = "name" + keyLayerCompression = "compression" + VariantOCI = "oci" + VariantDocker = "docker" + ociTypes = "oci-mediatypes" + keyForceCompression = "force-compression" + keyCompressionLevel = "compression-level" + keyBuildInfo = "buildinfo" + keyBuildInfoAttrs = "buildinfo-attrs" + // preferNondistLayersKey is an exporter option which can be used to mark a layer as non-distributable if the layer reference was + // already found to use a non-distributable media type. + // When this option is not set, the exporter will change the media type of the layer to a distributable one. + preferNondistLayersKey = "prefer-nondist-layers" +) + +type Opt struct { + SessionManager *session.Manager + ImageWriter *containerimage.ImageWriter + Variant ExporterVariant + LeaseManager leases.Manager +} + +type imageExporter struct { + opt Opt +} + +func New(opt Opt) (exporter.Exporter, error) { + im := &imageExporter{opt: opt} + return im, nil +} + +func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) { + var ot *bool + i := &imageExporterInstance{ + imageExporter: e, + layerCompression: compression.Default, + buildInfo: true, + } + var esgz bool + for k, v := range opt { + switch k { + case keyImageName: + i.name = v + case keyLayerCompression: + switch v { + case "gzip": + i.layerCompression = compression.Gzip + case "estargz": + i.layerCompression = compression.EStargz + esgz = true + case "zstd": + i.layerCompression = compression.Zstd + case "uncompressed": + i.layerCompression = compression.Uncompressed + default: + return nil, errors.Errorf("unsupported layer compression type: %v", v) + } + case keyForceCompression: + if v == "" { + i.forceCompression = true + continue + } + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value %v specified for %s", v, k) + } + i.forceCompression = b + case keyCompressionLevel: + ii, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return nil, errors.Wrapf(err, "non-int value %s specified for %s", v, k) + } + v := int(ii) + i.compressionLevel = &v + case ociTypes: + ot = new(bool) + if v == "" { + *ot = true + continue + } + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value specified for %s", k) + } + *ot = b + case keyBuildInfo: + if v == "" { + i.buildInfo = true + continue + } + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value specified for %s", k) + } + i.buildInfo = b + case keyBuildInfoAttrs: + if v == "" { + i.buildInfoAttrs = false + continue + } + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value specified for %s", k) + } + i.buildInfoAttrs = b + case preferNondistLayersKey: + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value specified for %s", k) + } + i.preferNonDist = b + default: + if i.meta == nil { + i.meta = make(map[string][]byte) + } + i.meta[k] = []byte(v) + } + } + if ot == nil { + i.ociTypes = e.opt.Variant == VariantOCI + } else { + i.ociTypes = *ot + } + if esgz && !i.ociTypes { + logrus.Warn("forcibly turning on oci-mediatype mode for estargz") + i.ociTypes = true + } + return i, nil +} + +type imageExporterInstance struct { + *imageExporter + meta map[string][]byte + name string + ociTypes bool + layerCompression compression.Type + forceCompression bool + compressionLevel *int + buildInfo bool + buildInfoAttrs bool + preferNonDist bool +} + +func (e *imageExporterInstance) Name() string { + return "exporting to oci image format" +} + +func (e *imageExporterInstance) Config() exporter.Config { + return exporter.Config{ + Compression: e.compression(), + } +} + +func (e *imageExporterInstance) compression() compression.Config { + c := compression.New(e.layerCompression).SetForce(e.forceCompression) + if e.compressionLevel != nil { + c = c.SetLevel(*e.compressionLevel) + } + return c +} + +func (e *imageExporterInstance) refCfg() cacheconfig.RefConfig { + return cacheconfig.RefConfig{ + Compression: e.compression(), + PreferNonDistributable: e.preferNonDist, + } +} + +func (e *imageExporterInstance) Export(ctx context.Context, src exporter.Source, sessionID string) (map[string]string, error) { + if e.opt.Variant == VariantDocker && len(src.Refs) > 0 { + return nil, errors.Errorf("docker exporter does not currently support exporting manifest lists") + } + + if src.Metadata == nil { + src.Metadata = make(map[string][]byte) + } + for k, v := range e.meta { + src.Metadata[k] = v + } + + ctx, done, err := leaseutil.WithLease(ctx, e.opt.LeaseManager, leaseutil.MakeTemporary) + if err != nil { + return nil, err + } + defer done(context.TODO()) + + desc, err := e.opt.ImageWriter.Commit(ctx, src, e.ociTypes, e.refCfg(), e.buildInfo, e.buildInfoAttrs, sessionID) + if err != nil { + return nil, err + } + defer func() { + e.opt.ImageWriter.ContentStore().Delete(context.TODO(), desc.Digest) + }() + + if desc.Annotations == nil { + desc.Annotations = map[string]string{} + } + desc.Annotations[ocispecs.AnnotationCreated] = time.Now().UTC().Format(time.RFC3339) + + resp := make(map[string]string) + + resp[exptypes.ExporterImageDigestKey] = desc.Digest.String() + if v, ok := desc.Annotations[exptypes.ExporterConfigDigestKey]; ok { + resp[exptypes.ExporterImageConfigDigestKey] = v + delete(desc.Annotations, exptypes.ExporterConfigDigestKey) + } + + dtdesc, err := json.Marshal(desc) + if err != nil { + return nil, err + } + resp[exptypes.ExporterImageDescriptorKey] = base64.StdEncoding.EncodeToString(dtdesc) + + if n, ok := src.Metadata["image.name"]; e.name == "*" && ok { + e.name = string(n) + } + + names, err := normalizedNames(e.name) + if err != nil { + return nil, err + } + + if len(names) != 0 { + resp["image.name"] = strings.Join(names, ",") + } + + expOpts := []archiveexporter.ExportOpt{archiveexporter.WithManifest(*desc, names...)} + switch e.opt.Variant { + case VariantOCI: + expOpts = append(expOpts, archiveexporter.WithAllPlatforms(), archiveexporter.WithSkipDockerManifest()) + case VariantDocker: + default: + return nil, errors.Errorf("invalid variant %q", e.opt.Variant) + } + + timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + caller, err := e.opt.SessionManager.Get(timeoutCtx, sessionID, false) + if err != nil { + return nil, err + } + + w, err := filesync.CopyFileWriter(ctx, resp, caller) + if err != nil { + return nil, err + } + + mprovider := contentutil.NewMultiProvider(e.opt.ImageWriter.ContentStore()) + if src.Ref != nil { + remotes, err := src.Ref.GetRemotes(ctx, false, e.refCfg(), false, session.NewGroup(sessionID)) + if err != nil { + return nil, err + } + remote := remotes[0] + // unlazy before tar export as the tar writer does not handle + // layer blobs in parallel (whereas unlazy does) + if unlazier, ok := remote.Provider.(cache.Unlazier); ok { + if err := unlazier.Unlazy(ctx); err != nil { + return nil, err + } + } + for _, desc := range remote.Descriptors { + mprovider.Add(desc.Digest, remote.Provider) + } + } + if len(src.Refs) > 0 { + for _, r := range src.Refs { + remotes, err := r.GetRemotes(ctx, false, e.refCfg(), false, session.NewGroup(sessionID)) + if err != nil { + return nil, err + } + remote := remotes[0] + if unlazier, ok := remote.Provider.(cache.Unlazier); ok { + if err := unlazier.Unlazy(ctx); err != nil { + return nil, err + } + } + for _, desc := range remote.Descriptors { + mprovider.Add(desc.Digest, remote.Provider) + } + } + } + + report := oneOffProgress(ctx, "sending tarball") + if err := archiveexporter.Export(ctx, mprovider, w, expOpts...); err != nil { + w.Close() + if grpcerrors.Code(err) == codes.AlreadyExists { + return resp, report(nil) + } + return nil, report(err) + } + err = w.Close() + if grpcerrors.Code(err) == codes.AlreadyExists { + return resp, report(nil) + } + return resp, report(err) +} + +func oneOffProgress(ctx context.Context, id string) func(err error) error { + pw, _, _ := progress.NewFromContext(ctx) + now := time.Now() + st := progress.Status{ + Started: &now, + } + pw.Write(id, st) + return func(err error) error { + // TODO: set error on status + now := time.Now() + st.Completed = &now + pw.Write(id, st) + pw.Close() + return err + } +} + +func normalizedNames(name string) ([]string, error) { + if name == "" { + return nil, nil + } + names := strings.Split(name, ",") + var tagNames = make([]string, len(names)) + for i, name := range names { + parsed, err := reference.ParseNormalizedNamed(name) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse %s", name) + } + tagNames[i] = reference.TagNameOnly(parsed).String() + } + return tagNames, nil +} diff --git a/vendor/github.com/moby/buildkit/snapshot/imagerefchecker/checker.go b/vendor/github.com/moby/buildkit/snapshot/imagerefchecker/checker.go new file mode 100644 index 0000000000000..eb6cb25f32636 --- /dev/null +++ b/vendor/github.com/moby/buildkit/snapshot/imagerefchecker/checker.go @@ -0,0 +1,129 @@ +package imagerefchecker + +import ( + "context" + "encoding/json" + "strings" + "sync" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/moby/buildkit/cache" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type Opt struct { + ImageStore images.Store + ContentStore content.Store +} + +// New creates new image reference checker that can be used to see if a reference +// is being used by any of the images in the image store +func New(opt Opt) cache.ExternalRefCheckerFunc { + return func() (cache.ExternalRefChecker, error) { + return &Checker{opt: opt}, nil + } +} + +type Checker struct { + opt Opt + once sync.Once + images map[string]struct{} + cache map[string]bool +} + +func (c *Checker) Exists(key string, blobs []digest.Digest) bool { + if c.opt.ImageStore == nil { + return false + } + + c.once.Do(c.init) + + if b, ok := c.cache[key]; ok { + return b + } + + _, ok := c.images[layerKey(blobs)] + c.cache[key] = ok + return ok +} + +func (c *Checker) init() { + c.images = map[string]struct{}{} + c.cache = map[string]bool{} + + imgs, err := c.opt.ImageStore.List(context.TODO()) + if err != nil { + return + } + + var mu sync.Mutex + + for _, img := range imgs { + if err := images.Dispatch(context.TODO(), images.Handlers(layersHandler(c.opt.ContentStore, func(layers []ocispecs.Descriptor) { + mu.Lock() + c.registerLayers(layers) + mu.Unlock() + })), nil, img.Target); err != nil { + return + } + } +} + +func (c *Checker) registerLayers(l []ocispecs.Descriptor) { + if k := layerKey(toDigests(l)); k != "" { + c.images[k] = struct{}{} + } +} + +func toDigests(layers []ocispecs.Descriptor) []digest.Digest { + digests := make([]digest.Digest, len(layers)) + for i, l := range layers { + digests[i] = l.Digest + } + return digests +} + +func layerKey(layers []digest.Digest) string { + b := &strings.Builder{} + for _, l := range layers { + b.Write([]byte(l)) + } + return b.String() +} + +func layersHandler(provider content.Provider, f func([]ocispecs.Descriptor)) images.HandlerFunc { + return func(ctx context.Context, desc ocispecs.Descriptor) ([]ocispecs.Descriptor, error) { + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispecs.MediaTypeImageManifest: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, nil + } + + var manifest ocispecs.Manifest + if err := json.Unmarshal(p, &manifest); err != nil { + return nil, err + } + + f(manifest.Layers) + return nil, nil + case images.MediaTypeDockerSchema2ManifestList, ocispecs.MediaTypeImageIndex: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, nil + } + + var index ocispecs.Index + if err := json.Unmarshal(p, &index); err != nil { + return nil, err + } + + return index.Manifests, nil + default: + return nil, errors.Errorf("encountered unknown type %v", desc.MediaType) + } + } +} diff --git a/vendor/github.com/moby/buildkit/source/containerimage/pull.go b/vendor/github.com/moby/buildkit/source/containerimage/pull.go new file mode 100644 index 0000000000000..a989f0e1e9d4f --- /dev/null +++ b/vendor/github.com/moby/buildkit/source/containerimage/pull.go @@ -0,0 +1,366 @@ +package containerimage + +import ( + "context" + "encoding/json" + "runtime" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/diff" + containerderrdefs "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/remotes/docker" + "github.com/containerd/containerd/snapshots" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/errdefs" + "github.com/moby/buildkit/source" + srctypes "github.com/moby/buildkit/source/types" + "github.com/moby/buildkit/util/estargz" + "github.com/moby/buildkit/util/flightcontrol" + "github.com/moby/buildkit/util/imageutil" + "github.com/moby/buildkit/util/leaseutil" + "github.com/moby/buildkit/util/progress" + "github.com/moby/buildkit/util/progress/controller" + "github.com/moby/buildkit/util/pull" + "github.com/moby/buildkit/util/resolver" + digest "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/identity" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// TODO: break apart containerd specifics like contentstore so the resolver +// code can be used with any implementation + +type SourceOpt struct { + Snapshotter snapshot.Snapshotter + ContentStore content.Store + Applier diff.Applier + CacheAccessor cache.Accessor + ImageStore images.Store // optional + RegistryHosts docker.RegistryHosts + LeaseManager leases.Manager +} + +type Source struct { + SourceOpt + g flightcontrol.Group +} + +var _ source.Source = &Source{} + +func NewSource(opt SourceOpt) (*Source, error) { + is := &Source{ + SourceOpt: opt, + } + + return is, nil +} + +func (is *Source) ID() string { + return srctypes.DockerImageScheme +} + +func (is *Source) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) { + type t struct { + dgst digest.Digest + dt []byte + } + key := ref + if platform := opt.Platform; platform != nil { + key += platforms.Format(*platform) + } + + rm, err := source.ParseImageResolveMode(opt.ResolveMode) + if err != nil { + return "", nil, err + } + key += rm.String() + + res, err := is.g.Do(ctx, key, func(ctx context.Context) (interface{}, error) { + res := resolver.DefaultPool.GetResolver(is.RegistryHosts, ref, "pull", sm, g).WithImageStore(is.ImageStore, rm) + dgst, dt, err := imageutil.Config(ctx, ref, res, is.ContentStore, is.LeaseManager, opt.Platform) + if err != nil { + return nil, err + } + return &t{dgst: dgst, dt: dt}, nil + }) + if err != nil { + return "", nil, err + } + typed := res.(*t) + return typed.dgst, typed.dt, nil +} + +func (is *Source) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager, vtx solver.Vertex) (source.SourceInstance, error) { + imageIdentifier, ok := id.(*source.ImageIdentifier) + if !ok { + return nil, errors.Errorf("invalid image identifier %v", id) + } + + platform := platforms.DefaultSpec() + if imageIdentifier.Platform != nil { + platform = *imageIdentifier.Platform + } + + pullerUtil := &pull.Puller{ + ContentStore: is.ContentStore, + Platform: platform, + Src: imageIdentifier.Reference, + } + p := &puller{ + CacheAccessor: is.CacheAccessor, + LeaseManager: is.LeaseManager, + Puller: pullerUtil, + id: imageIdentifier, + RegistryHosts: is.RegistryHosts, + ImageStore: is.ImageStore, + Mode: imageIdentifier.ResolveMode, + Ref: imageIdentifier.Reference.String(), + SessionManager: sm, + vtx: vtx, + } + return p, nil +} + +type puller struct { + CacheAccessor cache.Accessor + LeaseManager leases.Manager + RegistryHosts docker.RegistryHosts + ImageStore images.Store + Mode source.ResolveMode + Ref string + SessionManager *session.Manager + id *source.ImageIdentifier + vtx solver.Vertex + + g flightcontrol.Group + cacheKeyErr error + cacheKeyDone bool + releaseTmpLeases func(context.Context) error + descHandlers cache.DescHandlers + manifest *pull.PulledManifests + manifestKey string + configKey string + *pull.Puller +} + +func mainManifestKey(ctx context.Context, desc ocispecs.Descriptor, platform ocispecs.Platform) (digest.Digest, error) { + dt, err := json.Marshal(struct { + Digest digest.Digest + OS string + Arch string + Variant string `json:",omitempty"` + }{ + Digest: desc.Digest, + OS: platform.OS, + Arch: platform.Architecture, + Variant: platform.Variant, + }) + if err != nil { + return "", err + } + return digest.FromBytes(dt), nil +} + +func (p *puller) CacheKey(ctx context.Context, g session.Group, index int) (cacheKey string, imgDigest string, cacheOpts solver.CacheOpts, cacheDone bool, err error) { + p.Puller.Resolver = resolver.DefaultPool.GetResolver(p.RegistryHosts, p.Ref, "pull", p.SessionManager, g).WithImageStore(p.ImageStore, p.id.ResolveMode) + + // progressFactory needs the outer context, the context in `p.g.Do` will + // be canceled before the progress output is complete + progressFactory := progress.FromContext(ctx) + + _, err = p.g.Do(ctx, "", func(ctx context.Context) (_ interface{}, err error) { + if p.cacheKeyErr != nil || p.cacheKeyDone == true { + return nil, p.cacheKeyErr + } + defer func() { + if !errdefs.IsCanceled(err) { + p.cacheKeyErr = err + } + }() + ctx, done, err := leaseutil.WithLease(ctx, p.LeaseManager, leases.WithExpiration(5*time.Minute), leaseutil.MakeTemporary) + if err != nil { + return nil, err + } + p.releaseTmpLeases = done + defer imageutil.AddLease(done) + + resolveProgressDone := oneOffProgress(ctx, "resolve "+p.Src.String()) + defer func() { + resolveProgressDone(err) + }() + + p.manifest, err = p.PullManifests(ctx) + if err != nil { + return nil, err + } + + if len(p.manifest.Descriptors) > 0 { + progressController := &controller.Controller{ + WriterFactory: progressFactory, + } + if p.vtx != nil { + progressController.Digest = p.vtx.Digest() + progressController.Name = p.vtx.Name() + progressController.ProgressGroup = p.vtx.Options().ProgressGroup + } + + p.descHandlers = cache.DescHandlers(make(map[digest.Digest]*cache.DescHandler)) + for i, desc := range p.manifest.Descriptors { + labels := snapshots.FilterInheritedLabels(desc.Annotations) + if labels == nil { + labels = make(map[string]string) + } + for k, v := range estargz.SnapshotLabels(p.manifest.Ref, p.manifest.Descriptors, i) { + labels[k] = v + } + p.descHandlers[desc.Digest] = &cache.DescHandler{ + Provider: p.manifest.Provider, + Progress: progressController, + SnapshotLabels: labels, + Annotations: desc.Annotations, + Ref: p.manifest.Ref, + } + } + } + + desc := p.manifest.MainManifestDesc + k, err := mainManifestKey(ctx, desc, p.Platform) + if err != nil { + return nil, err + } + p.manifestKey = k.String() + + dt, err := content.ReadBlob(ctx, p.ContentStore, p.manifest.ConfigDesc) + if err != nil { + return nil, err + } + p.configKey = cacheKeyFromConfig(dt).String() + p.cacheKeyDone = true + return nil, nil + }) + if err != nil { + return "", "", nil, false, err + } + + cacheOpts = solver.CacheOpts(make(map[interface{}]interface{})) + for dgst, descHandler := range p.descHandlers { + cacheOpts[cache.DescHandlerKey(dgst)] = descHandler + } + + cacheDone = index > 0 + if index == 0 || p.configKey == "" { + return p.manifestKey, p.manifest.MainManifestDesc.Digest.String(), cacheOpts, cacheDone, nil + } + return p.configKey, p.manifest.MainManifestDesc.Digest.String(), cacheOpts, cacheDone, nil +} + +func (p *puller) Snapshot(ctx context.Context, g session.Group) (ir cache.ImmutableRef, err error) { + p.Puller.Resolver = resolver.DefaultPool.GetResolver(p.RegistryHosts, p.Ref, "pull", p.SessionManager, g).WithImageStore(p.ImageStore, p.id.ResolveMode) + + if len(p.manifest.Descriptors) == 0 { + return nil, nil + } + defer func() { + if p.releaseTmpLeases != nil { + p.releaseTmpLeases(context.TODO()) + } + }() + + var current cache.ImmutableRef + defer func() { + if err != nil && current != nil { + current.Release(context.TODO()) + } + }() + + var parent cache.ImmutableRef + setWindowsLayerType := p.Platform.OS == "windows" && runtime.GOOS != "windows" + for _, layerDesc := range p.manifest.Descriptors { + parent = current + current, err = p.CacheAccessor.GetByBlob(ctx, layerDesc, parent, + p.descHandlers, cache.WithImageRef(p.manifest.Ref)) + if parent != nil { + parent.Release(context.TODO()) + } + if err != nil { + return nil, err + } + if setWindowsLayerType { + if err := current.SetLayerType("windows"); err != nil { + return nil, err + } + } + } + + for _, desc := range p.manifest.Nonlayers { + if _, err := p.ContentStore.Info(ctx, desc.Digest); containerderrdefs.IsNotFound(err) { + // manifest or config must have gotten gc'd after CacheKey, re-pull them + ctx, done, err := leaseutil.WithLease(ctx, p.LeaseManager, leaseutil.MakeTemporary) + if err != nil { + return nil, err + } + defer done(ctx) + + if _, err := p.PullManifests(ctx); err != nil { + return nil, err + } + } else if err != nil { + return nil, err + } + + if err := p.LeaseManager.AddResource(ctx, leases.Lease{ID: current.ID()}, leases.Resource{ + ID: desc.Digest.String(), + Type: "content", + }); err != nil { + return nil, err + } + } + + if p.id.RecordType != "" && current.GetRecordType() == "" { + if err := current.SetRecordType(p.id.RecordType); err != nil { + return nil, err + } + } + + return current, nil +} + +// cacheKeyFromConfig returns a stable digest from image config. If image config +// is a known oci image we will use chainID of layers. +func cacheKeyFromConfig(dt []byte) digest.Digest { + var img ocispecs.Image + err := json.Unmarshal(dt, &img) + if err != nil { + return digest.FromBytes(dt) + } + if img.RootFS.Type != "layers" || len(img.RootFS.DiffIDs) == 0 { + return "" + } + return identity.ChainID(img.RootFS.DiffIDs) +} + +func oneOffProgress(ctx context.Context, id string) func(err error) error { + pw, _, _ := progress.NewFromContext(ctx) + now := time.Now() + st := progress.Status{ + Started: &now, + } + pw.Write(id, st) + return func(err error) error { + // TODO: set error on status + now := time.Now() + st.Completed = &now + pw.Write(id, st) + pw.Close() + return err + } +} diff --git a/vendor/github.com/moby/buildkit/util/network/cniprovider/allowempty.s b/vendor/github.com/moby/buildkit/util/network/cniprovider/allowempty.s new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/vendor/github.com/moby/buildkit/util/network/cniprovider/cni.go b/vendor/github.com/moby/buildkit/util/network/cniprovider/cni.go new file mode 100644 index 0000000000000..8ff4cad601cee --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/network/cniprovider/cni.go @@ -0,0 +1,106 @@ +package cniprovider + +import ( + "context" + "os" + "runtime" + + cni "github.com/containerd/go-cni" + "github.com/gofrs/flock" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/util/network" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +type Opt struct { + Root string + ConfigPath string + BinaryDir string +} + +func New(opt Opt) (network.Provider, error) { + if _, err := os.Stat(opt.ConfigPath); err != nil { + return nil, errors.Wrapf(err, "failed to read cni config %q", opt.ConfigPath) + } + if _, err := os.Stat(opt.BinaryDir); err != nil { + return nil, errors.Wrapf(err, "failed to read cni binary dir %q", opt.BinaryDir) + } + + cniOptions := []cni.Opt{cni.WithPluginDir([]string{opt.BinaryDir}), cni.WithInterfacePrefix("eth")} + + // Windows doesn't use CNI for loopback. + if runtime.GOOS != "windows" { + cniOptions = append([]cni.Opt{cni.WithMinNetworkCount(2)}, cniOptions...) + cniOptions = append(cniOptions, cni.WithLoNetwork) + } + + cniOptions = append(cniOptions, cni.WithConfFile(opt.ConfigPath)) + + cniHandle, err := cni.New(cniOptions...) + if err != nil { + return nil, err + } + + cp := &cniProvider{CNI: cniHandle, root: opt.Root} + if err := cp.initNetwork(); err != nil { + return nil, err + } + return cp, nil +} + +type cniProvider struct { + cni.CNI + root string +} + +func (c *cniProvider) initNetwork() error { + if v := os.Getenv("BUILDKIT_CNI_INIT_LOCK_PATH"); v != "" { + l := flock.New(v) + if err := l.Lock(); err != nil { + return err + } + defer l.Unlock() + } + ns, err := c.New() + if err != nil { + return err + } + return ns.Close() +} + +func (c *cniProvider) New() (network.Namespace, error) { + id := identity.NewID() + nativeID, err := createNetNS(c, id) + if err != nil { + return nil, err + } + + if _, err := c.CNI.Setup(context.TODO(), id, nativeID); err != nil { + deleteNetNS(nativeID) + return nil, errors.Wrap(err, "CNI setup error") + } + + return &cniNS{nativeID: nativeID, id: id, handle: c.CNI}, nil +} + +type cniNS struct { + handle cni.CNI + id string + nativeID string +} + +func (ns *cniNS) Set(s *specs.Spec) error { + return setNetNS(s, ns.nativeID) +} + +func (ns *cniNS) Close() error { + err := ns.handle.Remove(context.TODO(), ns.id, ns.nativeID) + if err1 := unmountNetNS(ns.nativeID); err1 != nil && err == nil { + err = err1 + } + if err1 := deleteNetNS(ns.nativeID); err1 != nil && err == nil { + err = err1 + } + return err +} diff --git a/vendor/github.com/moby/buildkit/util/network/cniprovider/cni_unsafe.go b/vendor/github.com/moby/buildkit/util/network/cniprovider/cni_unsafe.go new file mode 100644 index 0000000000000..eb6dcacefce55 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/network/cniprovider/cni_unsafe.go @@ -0,0 +1,17 @@ +//go:build linux +// +build linux + +package cniprovider + +import ( + _ "unsafe" // required for go:linkname. +) + +//go:linkname beforeFork syscall.runtime_BeforeFork +func beforeFork() + +//go:linkname afterFork syscall.runtime_AfterFork +func afterFork() + +//go:linkname afterForkInChild syscall.runtime_AfterForkInChild +func afterForkInChild() diff --git a/vendor/github.com/moby/buildkit/util/network/cniprovider/createns_linux.go b/vendor/github.com/moby/buildkit/util/network/cniprovider/createns_linux.go new file mode 100644 index 0000000000000..f1138a9fd5b3d --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/network/cniprovider/createns_linux.go @@ -0,0 +1,98 @@ +//go:build linux +// +build linux + +package cniprovider + +import ( + "os" + "path/filepath" + "syscall" + "unsafe" + + "github.com/containerd/containerd/oci" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func createNetNS(c *cniProvider, id string) (string, error) { + nsPath := filepath.Join(c.root, "net/cni", id) + if err := os.MkdirAll(filepath.Dir(nsPath), 0700); err != nil { + return "", err + } + + f, err := os.Create(nsPath) + if err != nil { + deleteNetNS(nsPath) + return "", err + } + if err := f.Close(); err != nil { + deleteNetNS(nsPath) + return "", err + } + procNetNSBytes, err := syscall.BytePtrFromString("/proc/self/ns/net") + if err != nil { + deleteNetNS(nsPath) + return "", err + } + nsPathBytes, err := syscall.BytePtrFromString(nsPath) + if err != nil { + deleteNetNS(nsPath) + return "", err + } + beforeFork() + + pid, _, errno := syscall.RawSyscall6(syscall.SYS_CLONE, uintptr(syscall.SIGCHLD)|unix.CLONE_NEWNET, 0, 0, 0, 0, 0) + if errno != 0 { + afterFork() + deleteNetNS(nsPath) + return "", errno + } + + if pid != 0 { + afterFork() + var ws unix.WaitStatus + _, err = unix.Wait4(int(pid), &ws, 0, nil) + for err == syscall.EINTR { + _, err = unix.Wait4(int(pid), &ws, 0, nil) + } + + if err != nil { + deleteNetNS(nsPath) + return "", errors.Wrapf(err, "failed to find pid=%d process", pid) + } + errno = syscall.Errno(ws.ExitStatus()) + if errno != 0 { + deleteNetNS(nsPath) + return "", errors.Wrapf(errno, "failed to mount %s (pid=%d)", nsPath, pid) + } + return nsPath, nil + } + afterForkInChild() + _, _, errno = syscall.RawSyscall6(syscall.SYS_MOUNT, uintptr(unsafe.Pointer(procNetNSBytes)), uintptr(unsafe.Pointer(nsPathBytes)), 0, uintptr(unix.MS_BIND), 0, 0) + syscall.RawSyscall(syscall.SYS_EXIT, uintptr(errno), 0, 0) + panic("unreachable") +} + +func setNetNS(s *specs.Spec, nsPath string) error { + return oci.WithLinuxNamespace(specs.LinuxNamespace{ + Type: specs.NetworkNamespace, + Path: nsPath, + })(nil, nil, nil, s) +} + +func unmountNetNS(nsPath string) error { + if err := unix.Unmount(nsPath, unix.MNT_DETACH); err != nil { + if err != syscall.EINVAL && err != syscall.ENOENT { + return errors.Wrap(err, "error unmounting network namespace") + } + } + return nil +} + +func deleteNetNS(nsPath string) error { + if err := os.Remove(nsPath); err != nil && !errors.Is(err, os.ErrNotExist) { + return errors.Wrapf(err, "error removing network namespace %s", nsPath) + } + return nil +} diff --git a/vendor/github.com/moby/buildkit/util/network/cniprovider/createns_unix.go b/vendor/github.com/moby/buildkit/util/network/cniprovider/createns_unix.go new file mode 100644 index 0000000000000..6aa4e00c56e27 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/network/cniprovider/createns_unix.go @@ -0,0 +1,25 @@ +//go:build !linux && !windows +// +build !linux,!windows + +package cniprovider + +import ( + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +func createNetNS(c *cniProvider, id string) (string, error) { + return "", errors.New("creating netns for cni not supported") +} + +func setNetNS(s *specs.Spec, nativeID string) error { + return errors.New("enabling netns for cni not supported") +} + +func unmountNetNS(nativeID string) error { + return errors.New("unmounting netns for cni not supported") +} + +func deleteNetNS(nativeID string) error { + return errors.New("deleting netns for cni not supported") +} diff --git a/vendor/github.com/moby/buildkit/util/network/cniprovider/createns_windows.go b/vendor/github.com/moby/buildkit/util/network/cniprovider/createns_windows.go new file mode 100644 index 0000000000000..7a0cc2d272abc --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/network/cniprovider/createns_windows.go @@ -0,0 +1,49 @@ +//go:build windows +// +build windows + +package cniprovider + +import ( + "github.com/Microsoft/hcsshim/hcn" + specs "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" +) + +func createNetNS(_ *cniProvider, id string) (string, error) { + nsTemplate := hcn.NewNamespace(hcn.NamespaceTypeGuest) + ns, err := nsTemplate.Create() + if err != nil { + return "", errors.Wrapf(err, "HostComputeNamespace.Create failed for %s", nsTemplate.Id) + } + + return ns.Id, nil +} + +func setNetNS(s *specs.Spec, nativeID string) error { + // Containerd doesn't have a wrapper for this. Code based on oci.WithLinuxNamespace and + // https://github.com/opencontainers/runtime-tools/blob/07406c5828aaf93f60d2aad770312d736811a276/generate/generate.go#L1810-L1814 + if s.Windows == nil { + s.Windows = &specs.Windows{} + } + if s.Windows.Network == nil { + s.Windows.Network = &specs.WindowsNetwork{} + } + + s.Windows.Network.NetworkNamespace = nativeID + + return nil +} + +func unmountNetNS(nativeID string) error { + // We don't need to unmount the NS. + return nil +} + +func deleteNetNS(nativeID string) error { + ns, err := hcn.GetNamespaceByID(nativeID) + if err != nil { + return errors.Wrapf(err, "failed to get namespace %s", nativeID) + } + + return ns.Delete() +} diff --git a/vendor/github.com/moby/buildkit/util/network/netproviders/network.go b/vendor/github.com/moby/buildkit/util/network/netproviders/network.go new file mode 100644 index 0000000000000..4265b7b29b5e2 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/network/netproviders/network.go @@ -0,0 +1,61 @@ +package netproviders + +import ( + "os" + + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/network" + "github.com/moby/buildkit/util/network/cniprovider" + "github.com/pkg/errors" +) + +type Opt struct { + CNI cniprovider.Opt + Mode string +} + +// Providers returns the network provider set. +// When opt.Mode is "auto" or "", resolvedMode is set to either "cni" or "host". +func Providers(opt Opt) (providers map[pb.NetMode]network.Provider, resolvedMode string, err error) { + var defaultProvider network.Provider + switch opt.Mode { + case "cni": + cniProvider, err := cniprovider.New(opt.CNI) + if err != nil { + return nil, resolvedMode, err + } + defaultProvider = cniProvider + resolvedMode = opt.Mode + case "host": + hostProvider, ok := getHostProvider() + if !ok { + return nil, resolvedMode, errors.New("no host network support on this platform") + } + defaultProvider = hostProvider + resolvedMode = opt.Mode + case "auto", "": + if _, err := os.Stat(opt.CNI.ConfigPath); err == nil { + cniProvider, err := cniprovider.New(opt.CNI) + if err != nil { + return nil, resolvedMode, err + } + defaultProvider = cniProvider + resolvedMode = "cni" + } else { + defaultProvider, resolvedMode = getFallback() + } + default: + return nil, resolvedMode, errors.Errorf("invalid network mode: %q", opt.Mode) + } + + providers = map[pb.NetMode]network.Provider{ + pb.NetMode_UNSET: defaultProvider, + pb.NetMode_NONE: network.NewNoneProvider(), + } + + if hostProvider, ok := getHostProvider(); ok { + providers[pb.NetMode_HOST] = hostProvider + } + + return providers, resolvedMode, nil +} diff --git a/vendor/github.com/moby/buildkit/util/network/netproviders/network_unix.go b/vendor/github.com/moby/buildkit/util/network/netproviders/network_unix.go new file mode 100644 index 0000000000000..b8d733ec32f05 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/network/netproviders/network_unix.go @@ -0,0 +1,18 @@ +//go:build !windows +// +build !windows + +package netproviders + +import ( + "github.com/moby/buildkit/util/network" + "github.com/sirupsen/logrus" +) + +func getHostProvider() (network.Provider, bool) { + return network.NewHostProvider(), true +} + +func getFallback() (network.Provider, string) { + logrus.Warn("using host network as the default") + return network.NewHostProvider(), "host" +} diff --git a/vendor/github.com/moby/buildkit/util/network/netproviders/network_windows.go b/vendor/github.com/moby/buildkit/util/network/netproviders/network_windows.go new file mode 100644 index 0000000000000..c7e460e333a2c --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/network/netproviders/network_windows.go @@ -0,0 +1,18 @@ +//go:build windows +// +build windows + +package netproviders + +import ( + "github.com/moby/buildkit/util/network" + "github.com/sirupsen/logrus" +) + +func getHostProvider() (network.Provider, bool) { + return nil, false +} + +func getFallback() (network.Provider, string) { + logrus.Warn("using null network as the default") + return network.NewNoneProvider(), "" +} diff --git a/vendor/github.com/moby/buildkit/util/pull/pull.go b/vendor/github.com/moby/buildkit/util/pull/pull.go new file mode 100644 index 0000000000000..003824027bd27 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/pull/pull.go @@ -0,0 +1,274 @@ +package pull + +import ( + "context" + "sync" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/reference" + "github.com/containerd/containerd/remotes" + "github.com/containerd/containerd/remotes/docker" + "github.com/containerd/containerd/remotes/docker/schema1" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/util/contentutil" + "github.com/moby/buildkit/util/flightcontrol" + "github.com/moby/buildkit/util/imageutil" + "github.com/moby/buildkit/util/progress/logs" + "github.com/moby/buildkit/util/pull/pullprogress" + "github.com/moby/buildkit/util/resolver" + "github.com/moby/buildkit/util/resolver/limited" + "github.com/moby/buildkit/util/resolver/retryhandler" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type Puller struct { + ContentStore content.Store + Resolver *resolver.Resolver + Src reference.Spec + Platform ocispecs.Platform + + g flightcontrol.Group + resolveErr error + resolveDone bool + desc ocispecs.Descriptor + configDesc ocispecs.Descriptor + ref string + layers []ocispecs.Descriptor + nonlayers []ocispecs.Descriptor +} + +var _ content.Provider = &provider{} + +type PulledManifests struct { + Ref string + MainManifestDesc ocispecs.Descriptor + ConfigDesc ocispecs.Descriptor + Nonlayers []ocispecs.Descriptor + Descriptors []ocispecs.Descriptor + Provider func(session.Group) content.Provider +} + +func (p *Puller) resolve(ctx context.Context, resolver remotes.Resolver) error { + _, err := p.g.Do(ctx, "", func(ctx context.Context) (_ interface{}, err error) { + if p.resolveErr != nil || p.resolveDone { + return nil, p.resolveErr + } + defer func() { + if !errors.Is(err, context.Canceled) { + p.resolveErr = err + } + }() + if p.tryLocalResolve(ctx) == nil { + return + } + ref, desc, err := resolver.Resolve(ctx, p.Src.String()) + if err != nil { + return nil, err + } + p.desc = desc + p.ref = ref + p.resolveDone = true + return nil, nil + }) + return err +} + +func (p *Puller) tryLocalResolve(ctx context.Context) error { + desc := ocispecs.Descriptor{ + Digest: p.Src.Digest(), + } + + if desc.Digest == "" { + return errors.New("empty digest") + } + + info, err := p.ContentStore.Info(ctx, desc.Digest) + if err != nil { + return err + } + desc.Size = info.Size + p.ref = p.Src.String() + ra, err := p.ContentStore.ReaderAt(ctx, desc) + if err != nil { + return err + } + mt, err := imageutil.DetectManifestMediaType(ra) + if err != nil { + return err + } + desc.MediaType = mt + p.desc = desc + return nil +} + +func (p *Puller) PullManifests(ctx context.Context) (*PulledManifests, error) { + err := p.resolve(ctx, p.Resolver) + if err != nil { + return nil, err + } + + platform := platforms.Only(p.Platform) + + var mu sync.Mutex // images.Dispatch calls handlers in parallel + metadata := make(map[digest.Digest]ocispecs.Descriptor) + + // TODO: need a wrapper snapshot interface that combines content + // and snapshots as 1) buildkit shouldn't have a dependency on contentstore + // or 2) cachemanager should manage the contentstore + var handlers []images.Handler + + fetcher, err := p.Resolver.Fetcher(ctx, p.ref) + if err != nil { + return nil, err + } + + var schema1Converter *schema1.Converter + if p.desc.MediaType == images.MediaTypeDockerSchema1Manifest { + // schema1 images are not lazy at this time, the converter will pull the whole image + // including layer blobs + schema1Converter = schema1.NewConverter(p.ContentStore, &pullprogress.FetcherWithProgress{ + Fetcher: fetcher, + Manager: p.ContentStore, + }) + handlers = append(handlers, schema1Converter) + } else { + // Get all the children for a descriptor + childrenHandler := images.ChildrenHandler(p.ContentStore) + // Filter the children by the platform + childrenHandler = images.FilterPlatforms(childrenHandler, platform) + // Limit manifests pulled to the best match in an index + childrenHandler = images.LimitManifests(childrenHandler, platform, 1) + + dslHandler, err := docker.AppendDistributionSourceLabel(p.ContentStore, p.ref) + if err != nil { + return nil, err + } + handlers = append(handlers, + filterLayerBlobs(metadata, &mu), + retryhandler.New(limited.FetchHandler(p.ContentStore, fetcher, p.ref), logs.LoggerFromContext(ctx)), + childrenHandler, + dslHandler, + ) + } + + if err := images.Dispatch(ctx, images.Handlers(handlers...), nil, p.desc); err != nil { + return nil, err + } + + if schema1Converter != nil { + p.desc, err = schema1Converter.Convert(ctx) + if err != nil { + return nil, err + } + + // this just gathers metadata about the converted descriptors making up the image, does + // not fetch anything + if err := images.Dispatch(ctx, images.Handlers( + filterLayerBlobs(metadata, &mu), + images.FilterPlatforms(images.ChildrenHandler(p.ContentStore), platform), + ), nil, p.desc); err != nil { + return nil, err + } + } + + for _, desc := range metadata { + p.nonlayers = append(p.nonlayers, desc) + switch desc.MediaType { + case images.MediaTypeDockerSchema2Config, ocispecs.MediaTypeImageConfig: + p.configDesc = desc + } + } + + // split all pulled data to layers and rest. layers remain roots and are deleted with snapshots. rest will be linked to layers. + p.layers, err = getLayers(ctx, p.ContentStore, p.desc, platform) + if err != nil { + return nil, err + } + + return &PulledManifests{ + Ref: p.ref, + MainManifestDesc: p.desc, + ConfigDesc: p.configDesc, + Nonlayers: p.nonlayers, + Descriptors: p.layers, + Provider: func(g session.Group) content.Provider { + return &provider{puller: p, resolver: p.Resolver.WithSession(g)} + }, + }, nil +} + +type provider struct { + puller *Puller + resolver remotes.Resolver +} + +func (p *provider) ReaderAt(ctx context.Context, desc ocispecs.Descriptor) (content.ReaderAt, error) { + err := p.puller.resolve(ctx, p.resolver) + if err != nil { + return nil, err + } + + fetcher, err := p.resolver.Fetcher(ctx, p.puller.ref) + if err != nil { + return nil, err + } + + return contentutil.FromFetcher(fetcher).ReaderAt(ctx, desc) +} + +// filterLayerBlobs causes layer blobs to be skipped for fetch, which is required to support lazy blobs. +// It also stores the non-layer blobs (metadata) it encounters in the provided map. +func filterLayerBlobs(metadata map[digest.Digest]ocispecs.Descriptor, mu sync.Locker) images.HandlerFunc { + return func(ctx context.Context, desc ocispecs.Descriptor) ([]ocispecs.Descriptor, error) { + switch desc.MediaType { + case + ocispecs.MediaTypeImageLayer, + ocispecs.MediaTypeImageLayerNonDistributable, + images.MediaTypeDockerSchema2Layer, + images.MediaTypeDockerSchema2LayerForeign, + ocispecs.MediaTypeImageLayerGzip, + images.MediaTypeDockerSchema2LayerGzip, + ocispecs.MediaTypeImageLayerNonDistributableGzip, + images.MediaTypeDockerSchema2LayerForeignGzip, + ocispecs.MediaTypeImageLayerZstd, + ocispecs.MediaTypeImageLayerNonDistributableZstd: + return nil, images.ErrSkipDesc + default: + if metadata != nil { + mu.Lock() + metadata[desc.Digest] = desc + mu.Unlock() + } + } + return nil, nil + } +} + +func getLayers(ctx context.Context, provider content.Provider, desc ocispecs.Descriptor, platform platforms.MatchComparer) ([]ocispecs.Descriptor, error) { + manifest, err := images.Manifest(ctx, provider, desc, platform) + if err != nil { + return nil, errors.WithStack(err) + } + image := images.Image{Target: desc} + diffIDs, err := image.RootFS(ctx, provider, platform) + if err != nil { + return nil, errors.Wrap(err, "failed to resolve rootfs") + } + if len(diffIDs) != len(manifest.Layers) { + return nil, errors.Errorf("mismatched image rootfs and manifest layers %+v %+v", diffIDs, manifest.Layers) + } + layers := make([]ocispecs.Descriptor, len(diffIDs)) + for i := range diffIDs { + desc := manifest.Layers[i] + if desc.Annotations == nil { + desc.Annotations = map[string]string{} + } + desc.Annotations["containerd.io/uncompressed"] = diffIDs[i].String() + layers[i] = desc + } + return layers, nil +} diff --git a/vendor/github.com/moby/buildkit/worker/base/worker.go b/vendor/github.com/moby/buildkit/worker/base/worker.go new file mode 100644 index 0000000000000..fa8b7692d9fdf --- /dev/null +++ b/vendor/github.com/moby/buildkit/worker/base/worker.go @@ -0,0 +1,481 @@ +package base + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/gc" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/remotes/docker" + "github.com/docker/docker/pkg/idtools" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/cache/metadata" + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/executor" + "github.com/moby/buildkit/exporter" + imageexporter "github.com/moby/buildkit/exporter/containerimage" + localexporter "github.com/moby/buildkit/exporter/local" + ociexporter "github.com/moby/buildkit/exporter/oci" + tarexporter "github.com/moby/buildkit/exporter/tar" + "github.com/moby/buildkit/frontend" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/snapshot/imagerefchecker" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver/mounts" + "github.com/moby/buildkit/solver/llbsolver/ops" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/source" + "github.com/moby/buildkit/source/containerimage" + "github.com/moby/buildkit/source/git" + "github.com/moby/buildkit/source/http" + "github.com/moby/buildkit/source/local" + "github.com/moby/buildkit/util/archutil" + "github.com/moby/buildkit/util/bklog" + "github.com/moby/buildkit/util/progress" + "github.com/moby/buildkit/util/progress/controller" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" + "golang.org/x/sync/semaphore" +) + +const labelCreatedAt = "buildkit/createdat" + +// TODO: this file should be removed. containerd defines ContainerdWorker, oci defines OCIWorker. There is no base worker. + +// WorkerOpt is specific to a worker. +// See also CommonOpt. +type WorkerOpt struct { + ID string + Labels map[string]string + Platforms []ocispecs.Platform + GCPolicy []client.PruneInfo + Executor executor.Executor + Snapshotter snapshot.Snapshotter + ContentStore content.Store + Applier diff.Applier + Differ diff.Comparer + ImageStore images.Store // optional + RegistryHosts docker.RegistryHosts + IdentityMapping *idtools.IdentityMapping + LeaseManager leases.Manager + GarbageCollect func(context.Context) (gc.Stats, error) + ParallelismSem *semaphore.Weighted + MetadataStore *metadata.Store + MountPoolRoot string +} + +// Worker is a local worker instance with dedicated snapshotter, cache, and so on. +// TODO: s/Worker/OpWorker/g ? +type Worker struct { + WorkerOpt + CacheMgr cache.Manager + SourceManager *source.Manager + imageWriter *imageexporter.ImageWriter + ImageSource *containerimage.Source +} + +// NewWorker instantiates a local worker +func NewWorker(ctx context.Context, opt WorkerOpt) (*Worker, error) { + imageRefChecker := imagerefchecker.New(imagerefchecker.Opt{ + ImageStore: opt.ImageStore, + ContentStore: opt.ContentStore, + }) + + cm, err := cache.NewManager(cache.ManagerOpt{ + Snapshotter: opt.Snapshotter, + PruneRefChecker: imageRefChecker, + Applier: opt.Applier, + GarbageCollect: opt.GarbageCollect, + LeaseManager: opt.LeaseManager, + ContentStore: opt.ContentStore, + Differ: opt.Differ, + MetadataStore: opt.MetadataStore, + MountPoolRoot: opt.MountPoolRoot, + }) + if err != nil { + return nil, err + } + + sm, err := source.NewManager() + if err != nil { + return nil, err + } + + is, err := containerimage.NewSource(containerimage.SourceOpt{ + Snapshotter: opt.Snapshotter, + ContentStore: opt.ContentStore, + Applier: opt.Applier, + ImageStore: opt.ImageStore, + CacheAccessor: cm, + RegistryHosts: opt.RegistryHosts, + LeaseManager: opt.LeaseManager, + }) + if err != nil { + return nil, err + } + + sm.Register(is) + + if err := git.Supported(); err == nil { + gs, err := git.NewSource(git.Opt{ + CacheAccessor: cm, + }) + if err != nil { + return nil, err + } + sm.Register(gs) + } else { + bklog.G(ctx).Warnf("git source cannot be enabled: %v", err) + } + + hs, err := http.NewSource(http.Opt{ + CacheAccessor: cm, + }) + if err != nil { + return nil, err + } + + sm.Register(hs) + + ss, err := local.NewSource(local.Opt{ + CacheAccessor: cm, + }) + if err != nil { + return nil, err + } + sm.Register(ss) + + iw, err := imageexporter.NewImageWriter(imageexporter.WriterOpt{ + Snapshotter: opt.Snapshotter, + ContentStore: opt.ContentStore, + Applier: opt.Applier, + Differ: opt.Differ, + }) + if err != nil { + return nil, err + } + + leases, err := opt.LeaseManager.List(ctx, "labels.\"buildkit/lease.temporary\"") + if err != nil { + return nil, err + } + for _, l := range leases { + opt.LeaseManager.Delete(ctx, l) + } + + return &Worker{ + WorkerOpt: opt, + CacheMgr: cm, + SourceManager: sm, + imageWriter: iw, + ImageSource: is, + }, nil +} + +func (w *Worker) ContentStore() content.Store { + return w.WorkerOpt.ContentStore +} + +func (w *Worker) ID() string { + return w.WorkerOpt.ID +} + +func (w *Worker) Labels() map[string]string { + return w.WorkerOpt.Labels +} + +func (w *Worker) Platforms(noCache bool) []ocispecs.Platform { + if noCache { + for _, p := range archutil.SupportedPlatforms(noCache) { + exists := false + for _, pp := range w.WorkerOpt.Platforms { + if platforms.Only(pp).Match(p) { + exists = true + break + } + } + if !exists { + w.WorkerOpt.Platforms = append(w.WorkerOpt.Platforms, p) + } + } + } + return w.WorkerOpt.Platforms +} + +func (w *Worker) GCPolicy() []client.PruneInfo { + return w.WorkerOpt.GCPolicy +} + +func (w *Worker) LoadRef(ctx context.Context, id string, hidden bool) (cache.ImmutableRef, error) { + var opts []cache.RefOption + if hidden { + opts = append(opts, cache.NoUpdateLastUsed) + } + if id == "" { + // results can have nil refs if they are optimized out to be equal to scratch, + // i.e. Diff(A,A) == scratch + return nil, nil + } + + var pg progress.Controller + optGetter := solver.CacheOptGetterOf(ctx) + if optGetter != nil { + if kv := optGetter(false, cache.ProgressKey{}); kv != nil { + if v, ok := kv[cache.ProgressKey{}].(progress.Controller); ok { + pg = v + } + } + } + + ref, err := w.CacheMgr.Get(ctx, id, pg, opts...) + var needsRemoteProviders cache.NeedsRemoteProviderError + if errors.As(err, &needsRemoteProviders) { + if optGetter != nil { + var keys []interface{} + for _, dgst := range needsRemoteProviders { + keys = append(keys, cache.DescHandlerKey(dgst)) + } + descHandlers := cache.DescHandlers(make(map[digest.Digest]*cache.DescHandler)) + for k, v := range optGetter(true, keys...) { + if key, ok := k.(cache.DescHandlerKey); ok { + if handler, ok := v.(*cache.DescHandler); ok { + descHandlers[digest.Digest(key)] = handler + } + } + } + opts = append(opts, descHandlers) + ref, err = w.CacheMgr.Get(ctx, id, pg, opts...) + } + } + if err != nil { + return nil, errors.Wrap(err, "failed to load ref") + } + return ref, nil +} + +func (w *Worker) Executor() executor.Executor { + return w.WorkerOpt.Executor +} + +func (w *Worker) CacheManager() cache.Manager { + return w.CacheMgr +} + +func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *session.Manager) (solver.Op, error) { + if baseOp, ok := v.Sys().(*pb.Op); ok { + switch op := baseOp.Op.(type) { + case *pb.Op_Source: + return ops.NewSourceOp(v, op, baseOp.Platform, w.SourceManager, w.ParallelismSem, sm, w) + case *pb.Op_Exec: + return ops.NewExecOp(v, op, baseOp.Platform, w.CacheMgr, w.ParallelismSem, sm, w.WorkerOpt.Executor, w) + case *pb.Op_File: + return ops.NewFileOp(v, op, w.CacheMgr, w.ParallelismSem, w) + case *pb.Op_Build: + return ops.NewBuildOp(v, op, s, w) + case *pb.Op_Merge: + return ops.NewMergeOp(v, op, w) + case *pb.Op_Diff: + return ops.NewDiffOp(v, op, w) + default: + return nil, errors.Errorf("no support for %T", op) + } + } + return nil, errors.Errorf("could not resolve %v", v) +} + +func (w *Worker) PruneCacheMounts(ctx context.Context, ids []string) error { + mu := mounts.CacheMountsLocker() + mu.Lock() + defer mu.Unlock() + + for _, id := range ids { + mds, err := mounts.SearchCacheDir(ctx, w.CacheMgr, id) + if err != nil { + return err + } + for _, md := range mds { + if err := md.SetCachePolicyDefault(); err != nil { + return err + } + if err := md.ClearCacheDirIndex(); err != nil { + return err + } + // if ref is unused try to clean it up right away by releasing it + if mref, err := w.CacheMgr.GetMutable(ctx, md.ID()); err == nil { + go mref.Release(context.TODO()) + } + } + } + + mounts.ClearActiveCacheMounts() + return nil +} + +func (w *Worker) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt, sm *session.Manager, g session.Group) (digest.Digest, []byte, error) { + return w.ImageSource.ResolveImageConfig(ctx, ref, opt, sm, g) +} + +func (w *Worker) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) ([]*client.UsageInfo, error) { + return w.CacheMgr.DiskUsage(ctx, opt) +} + +func (w *Worker) Prune(ctx context.Context, ch chan client.UsageInfo, opt ...client.PruneInfo) error { + return w.CacheMgr.Prune(ctx, ch, opt...) +} + +func (w *Worker) Exporter(name string, sm *session.Manager) (exporter.Exporter, error) { + switch name { + case client.ExporterImage: + return imageexporter.New(imageexporter.Opt{ + Images: w.ImageStore, + SessionManager: sm, + ImageWriter: w.imageWriter, + RegistryHosts: w.RegistryHosts, + LeaseManager: w.LeaseManager, + }) + case client.ExporterLocal: + return localexporter.New(localexporter.Opt{ + SessionManager: sm, + }) + case client.ExporterTar: + return tarexporter.New(tarexporter.Opt{ + SessionManager: sm, + }) + case client.ExporterOCI: + return ociexporter.New(ociexporter.Opt{ + SessionManager: sm, + ImageWriter: w.imageWriter, + Variant: ociexporter.VariantOCI, + LeaseManager: w.LeaseManager, + }) + case client.ExporterDocker: + return ociexporter.New(ociexporter.Opt{ + SessionManager: sm, + ImageWriter: w.imageWriter, + Variant: ociexporter.VariantDocker, + LeaseManager: w.LeaseManager, + }) + default: + return nil, errors.Errorf("exporter %q could not be found", name) + } +} + +func (w *Worker) FromRemote(ctx context.Context, remote *solver.Remote) (ref cache.ImmutableRef, err error) { + if cd, ok := remote.Provider.(interface { + CheckDescriptor(context.Context, ocispecs.Descriptor) error + }); ok && len(remote.Descriptors) > 0 { + var eg errgroup.Group + for _, desc := range remote.Descriptors { + desc := desc + eg.Go(func() error { + if err := cd.CheckDescriptor(ctx, desc); err != nil { + return err + } + return nil + }) + } + if err := eg.Wait(); err != nil { + return nil, err + } + } + + var pg progress.Controller + optGetter := solver.CacheOptGetterOf(ctx) + if optGetter != nil { + if kv := optGetter(false, cache.ProgressKey{}); kv != nil { + if v, ok := kv[cache.ProgressKey{}].(progress.Controller); ok { + pg = v + } + } + } + if pg == nil { + pg = &controller.Controller{ + WriterFactory: progress.FromContext(ctx), + } + } + + descHandler := &cache.DescHandler{ + Provider: func(session.Group) content.Provider { return remote.Provider }, + Progress: pg, + } + snapshotLabels := func([]ocispecs.Descriptor, int) map[string]string { return nil } + if cd, ok := remote.Provider.(interface { + SnapshotLabels([]ocispecs.Descriptor, int) map[string]string + }); ok { + snapshotLabels = cd.SnapshotLabels + } + descHandlers := cache.DescHandlers(make(map[digest.Digest]*cache.DescHandler)) + for i, desc := range remote.Descriptors { + descHandlers[desc.Digest] = &cache.DescHandler{ + Provider: descHandler.Provider, + Progress: descHandler.Progress, + Annotations: desc.Annotations, + SnapshotLabels: snapshotLabels(remote.Descriptors, i), + } + } + + var current cache.ImmutableRef + for i, desc := range remote.Descriptors { + tm := time.Now() + if tmstr, ok := desc.Annotations[labelCreatedAt]; ok { + if err := (&tm).UnmarshalText([]byte(tmstr)); err != nil { + if current != nil { + current.Release(context.TODO()) + } + return nil, err + } + } + descr := fmt.Sprintf("imported %s", remote.Descriptors[i].Digest) + if v, ok := desc.Annotations["buildkit/description"]; ok { + descr = v + } + opts := []cache.RefOption{ + cache.WithDescription(descr), + cache.WithCreationTime(tm), + descHandlers, + } + if dh, ok := descHandlers[desc.Digest]; ok { + if ref, ok := dh.Annotations["containerd.io/distribution.source.ref"]; ok { + opts = append(opts, cache.WithImageRef(ref)) // can set by registry cache importer + } + } + ref, err := w.CacheMgr.GetByBlob(ctx, desc, current, opts...) + if current != nil { + current.Release(context.TODO()) + } + if err != nil { + return nil, err + } + current = ref + } + return current, nil +} + +// ID reads the worker id from the `workerid` file. +// If not exist, it creates a random one, +func ID(root string) (string, error) { + f := filepath.Join(root, "workerid") + b, err := ioutil.ReadFile(f) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + id := identity.NewID() + err := ioutil.WriteFile(f, []byte(id), 0400) + return id, err + } + return "", err + } + return string(b), nil +} diff --git a/vendor/github.com/moby/buildkit/worker/containerd/containerd.go b/vendor/github.com/moby/buildkit/worker/containerd/containerd.go new file mode 100644 index 0000000000000..c671c99e3c990 --- /dev/null +++ b/vendor/github.com/moby/buildkit/worker/containerd/containerd.go @@ -0,0 +1,150 @@ +package containerd + +import ( + "context" + "os" + "path/filepath" + "strings" + + "github.com/containerd/containerd" + "github.com/containerd/containerd/gc" + "github.com/containerd/containerd/leases" + gogoptypes "github.com/gogo/protobuf/types" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/cache/metadata" + "github.com/moby/buildkit/executor/containerdexecutor" + "github.com/moby/buildkit/executor/oci" + containerdsnapshot "github.com/moby/buildkit/snapshot/containerd" + "github.com/moby/buildkit/util/leaseutil" + "github.com/moby/buildkit/util/network/netproviders" + "github.com/moby/buildkit/util/winlayers" + "github.com/moby/buildkit/worker" + "github.com/moby/buildkit/worker/base" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "golang.org/x/sync/semaphore" +) + +// NewWorkerOpt creates a WorkerOpt. +func NewWorkerOpt(root string, address, snapshotterName, ns string, rootless bool, labels map[string]string, dns *oci.DNSConfig, nopt netproviders.Opt, apparmorProfile string, parallelismSem *semaphore.Weighted, traceSocket string, opts ...containerd.ClientOpt) (base.WorkerOpt, error) { + opts = append(opts, containerd.WithDefaultNamespace(ns)) + client, err := containerd.New(address, opts...) + if err != nil { + return base.WorkerOpt{}, errors.Wrapf(err, "failed to connect client to %q . make sure containerd is running", address) + } + return newContainerd(root, client, snapshotterName, ns, rootless, labels, dns, nopt, apparmorProfile, parallelismSem, traceSocket) +} + +func newContainerd(root string, client *containerd.Client, snapshotterName, ns string, rootless bool, labels map[string]string, dns *oci.DNSConfig, nopt netproviders.Opt, apparmorProfile string, parallelismSem *semaphore.Weighted, traceSocket string) (base.WorkerOpt, error) { + if strings.Contains(snapshotterName, "/") { + return base.WorkerOpt{}, errors.Errorf("bad snapshotter name: %q", snapshotterName) + } + name := "containerd-" + snapshotterName + root = filepath.Join(root, name) + if err := os.MkdirAll(root, 0700); err != nil { + return base.WorkerOpt{}, errors.Wrapf(err, "failed to create %s", root) + } + + df := client.DiffService() + // TODO: should use containerd daemon instance ID (containerd/containerd#1862)? + id, err := base.ID(root) + if err != nil { + return base.WorkerOpt{}, err + } + + serverInfo, err := client.IntrospectionService().Server(context.TODO(), &gogoptypes.Empty{}) + if err != nil { + return base.WorkerOpt{}, err + } + + np, npResolvedMode, err := netproviders.Providers(nopt) + if err != nil { + return base.WorkerOpt{}, err + } + + hostname, err := os.Hostname() + if err != nil { + hostname = "unknown" + } + xlabels := map[string]string{ + worker.LabelExecutor: "containerd", + worker.LabelSnapshotter: snapshotterName, + worker.LabelHostname: hostname, + worker.LabelNetwork: npResolvedMode, + } + if apparmorProfile != "" { + xlabels[worker.LabelApparmorProfile] = apparmorProfile + } + xlabels[worker.LabelContainerdNamespace] = ns + xlabels[worker.LabelContainerdUUID] = serverInfo.UUID + for k, v := range labels { + xlabels[k] = v + } + + lm := leaseutil.WithNamespace(client.LeasesService(), ns) + + gc := func(ctx context.Context) (gc.Stats, error) { + l, err := lm.Create(ctx) + if err != nil { + return nil, nil + } + return nil, lm.Delete(ctx, leases.Lease{ID: l.ID}, leases.SynchronousDelete) + } + + cs := containerdsnapshot.NewContentStore(client.ContentStore(), ns) + + resp, err := client.IntrospectionService().Plugins(context.TODO(), []string{"type==io.containerd.runtime.v1", "type==io.containerd.runtime.v2"}) + if err != nil { + return base.WorkerOpt{}, errors.Wrap(err, "failed to list runtime plugin") + } + if len(resp.Plugins) == 0 { + return base.WorkerOpt{}, errors.New("failed to find any runtime plugins") + } + + var platforms []ocispecs.Platform + for _, plugin := range resp.Plugins { + for _, p := range plugin.Platforms { + platforms = append(platforms, ocispecs.Platform{ + OS: p.OS, + Architecture: p.Architecture, + Variant: p.Variant, + }) + } + } + + snap := containerdsnapshot.NewSnapshotter(snapshotterName, client.SnapshotService(snapshotterName), ns, nil) + + if err := cache.MigrateV2( + context.TODO(), + filepath.Join(root, "metadata.db"), + filepath.Join(root, "metadata_v2.db"), + cs, + snap, + lm, + ); err != nil { + return base.WorkerOpt{}, err + } + + md, err := metadata.NewStore(filepath.Join(root, "metadata_v2.db")) + if err != nil { + return base.WorkerOpt{}, err + } + + opt := base.WorkerOpt{ + ID: id, + Labels: xlabels, + MetadataStore: md, + Executor: containerdexecutor.New(client, root, "", np, dns, apparmorProfile, traceSocket, rootless), + Snapshotter: snap, + ContentStore: cs, + Applier: winlayers.NewFileSystemApplierWithWindows(cs, df), + Differ: winlayers.NewWalkingDiffWithWindows(cs, df), + ImageStore: client.ImageService(), + Platforms: platforms, + LeaseManager: lm, + GarbageCollect: gc, + ParallelismSem: parallelismSem, + MountPoolRoot: filepath.Join(root, "cachemounts"), + } + return opt, nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 674b0bc5e9504..900babefe8517 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -32,6 +32,8 @@ github.com/Microsoft/go-winio/vhd github.com/Microsoft/hcsshim github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options github.com/Microsoft/hcsshim/computestorage +github.com/Microsoft/hcsshim/hcn +github.com/Microsoft/hcsshim/internal/cni github.com/Microsoft/hcsshim/internal/cow github.com/Microsoft/hcsshim/internal/hcs github.com/Microsoft/hcsshim/internal/hcs/schema1 @@ -46,6 +48,8 @@ github.com/Microsoft/hcsshim/internal/longpath github.com/Microsoft/hcsshim/internal/mergemaps github.com/Microsoft/hcsshim/internal/oc github.com/Microsoft/hcsshim/internal/queue +github.com/Microsoft/hcsshim/internal/regstate +github.com/Microsoft/hcsshim/internal/runhcs github.com/Microsoft/hcsshim/internal/safefile github.com/Microsoft/hcsshim/internal/timeout github.com/Microsoft/hcsshim/internal/vmcompute @@ -241,6 +245,9 @@ github.com/containerd/continuity/sysx # github.com/containerd/fifo v1.0.0 ## explicit; go 1.13 github.com/containerd/fifo +# github.com/containerd/go-cni v1.1.6 +## explicit; go 1.17 +github.com/containerd/go-cni # github.com/containerd/go-runc v1.0.0 ## explicit; go 1.13 github.com/containerd/go-runc @@ -257,6 +264,18 @@ github.com/containerd/ttrpc # github.com/containerd/typeurl v1.0.2 ## explicit; go 1.13 github.com/containerd/typeurl +# github.com/containernetworking/cni v1.1.1 +## explicit; go 1.14 +github.com/containernetworking/cni/libcni +github.com/containernetworking/cni/pkg/invoke +github.com/containernetworking/cni/pkg/types +github.com/containernetworking/cni/pkg/types/020 +github.com/containernetworking/cni/pkg/types/040 +github.com/containernetworking/cni/pkg/types/100 +github.com/containernetworking/cni/pkg/types/create +github.com/containernetworking/cni/pkg/types/internal +github.com/containernetworking/cni/pkg/utils +github.com/containernetworking/cni/pkg/version # github.com/coreos/go-systemd/v22 v22.3.2 ## explicit; go 1.12 github.com/coreos/go-systemd/v22/activation @@ -497,11 +516,14 @@ github.com/moby/buildkit/client/ociindex github.com/moby/buildkit/control github.com/moby/buildkit/control/gateway github.com/moby/buildkit/executor +github.com/moby/buildkit/executor/containerdexecutor github.com/moby/buildkit/executor/oci github.com/moby/buildkit/executor/runcexecutor github.com/moby/buildkit/exporter +github.com/moby/buildkit/exporter/containerimage github.com/moby/buildkit/exporter/containerimage/exptypes github.com/moby/buildkit/exporter/local +github.com/moby/buildkit/exporter/oci github.com/moby/buildkit/exporter/tar github.com/moby/buildkit/frontend github.com/moby/buildkit/frontend/dockerfile/builder @@ -528,6 +550,7 @@ github.com/moby/buildkit/session/sshforward github.com/moby/buildkit/session/upload github.com/moby/buildkit/snapshot github.com/moby/buildkit/snapshot/containerd +github.com/moby/buildkit/snapshot/imagerefchecker github.com/moby/buildkit/solver github.com/moby/buildkit/solver/bboltcachestorage github.com/moby/buildkit/solver/errdefs @@ -540,6 +563,7 @@ github.com/moby/buildkit/solver/llbsolver/ops github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes github.com/moby/buildkit/solver/pb github.com/moby/buildkit/source +github.com/moby/buildkit/source/containerimage github.com/moby/buildkit/source/git github.com/moby/buildkit/source/http github.com/moby/buildkit/source/local @@ -563,10 +587,13 @@ github.com/moby/buildkit/util/grpcerrors github.com/moby/buildkit/util/imageutil github.com/moby/buildkit/util/leaseutil github.com/moby/buildkit/util/network +github.com/moby/buildkit/util/network/cniprovider +github.com/moby/buildkit/util/network/netproviders github.com/moby/buildkit/util/overlay github.com/moby/buildkit/util/progress github.com/moby/buildkit/util/progress/controller github.com/moby/buildkit/util/progress/logs +github.com/moby/buildkit/util/pull github.com/moby/buildkit/util/pull/pullprogress github.com/moby/buildkit/util/push github.com/moby/buildkit/util/resolver @@ -587,6 +614,8 @@ github.com/moby/buildkit/util/urlutil github.com/moby/buildkit/util/winlayers github.com/moby/buildkit/version github.com/moby/buildkit/worker +github.com/moby/buildkit/worker/base +github.com/moby/buildkit/worker/containerd # github.com/moby/ipvs v1.0.2 ## explicit; go 1.13 github.com/moby/ipvs From 523bc1a871472a1177890c2ca07237955691aeca Mon Sep 17 00:00:00 2001 From: Djordje Lukic Date: Mon, 1 Aug 2022 14:38:45 +0200 Subject: [PATCH 19/90] Don't look at graphdriver data if using containerd Signed-off-by: Djordje Lukic --- daemon/inspect.go | 30 ++++++++++++++++-------------- 1 file changed, 16 insertions(+), 14 deletions(-) diff --git a/daemon/inspect.go b/daemon/inspect.go index c873cf315e2c1..bf1c083fa820d 100644 --- a/daemon/inspect.go +++ b/daemon/inspect.go @@ -188,23 +188,25 @@ func (daemon *Daemon) getInspectData(container *container.Container) (*types.Con contJSONBase.GraphDriver.Name = container.Driver - if container.RWLayer == nil { - if container.Dead { - return contJSONBase, nil + if !daemon.UsesSnapshotter() { + if container.RWLayer == nil { + if container.Dead { + return contJSONBase, nil + } + return nil, errdefs.System(errors.New("RWLayer of container " + container.ID + " is unexpectedly nil")) } - return nil, errdefs.System(errors.New("RWLayer of container " + container.ID + " is unexpectedly nil")) - } - graphDriverData, err := container.RWLayer.Metadata() - // If container is marked as Dead, the container's graphdriver metadata - // could have been removed, it will cause error if we try to get the metadata, - // we can ignore the error if the container is dead. - if err != nil { - if !container.Dead { - return nil, errdefs.System(err) + graphDriverData, err := container.RWLayer.Metadata() + // If container is marked as Dead, the container's graphdriver metadata + // could have been removed, it will cause error if we try to get the metadata, + // we can ignore the error if the container is dead. + if err != nil { + if !container.Dead { + return nil, errdefs.System(err) + } + } else { + contJSONBase.GraphDriver.Data = graphDriverData } - } else { - contJSONBase.GraphDriver.Data = graphDriverData } return contJSONBase, nil From e118033dd748a41e98953f07e19303aa1128ce02 Mon Sep 17 00:00:00 2001 From: Djordje Lukic Date: Mon, 1 Aug 2022 14:40:09 +0200 Subject: [PATCH 20/90] Only use the image exporter in build if we don't use containerd Without this "docker build" fails with: Error response from daemon: exporter "image" could not be found Signed-off-by: Djordje Lukic --- builder/builder-next/builder.go | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/builder/builder-next/builder.go b/builder/builder-next/builder.go index 631befebc4b98..49796ecbc1b14 100644 --- a/builder/builder-next/builder.go +++ b/builder/builder-next/builder.go @@ -87,8 +87,9 @@ type Builder struct { controller *mobycontrol.Controller reqBodyHandler *reqBodyHandler - mu sync.Mutex - jobs map[string]*buildJob + mu sync.Mutex + jobs map[string]*buildJob + useSnapshotter bool } // New creates a new builder @@ -103,6 +104,7 @@ func New(opt Opt) (*Builder, error) { controller: c, reqBodyHandler: reqHandler, jobs: map[string]*buildJob{}, + useSnapshotter: opt.UseSnapshotter, } return b, nil } @@ -340,14 +342,23 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder. names = append(names, tag.String()) } - exporterName := client.ExporterImage - exporterAttrs := map[string]string{ - "image.name": strings.Join(names, ","), - "name": strings.Join(names, ","), + exporterName := "" + exporterAttrs := map[string]string{} + + if b.useSnapshotter { + exporterName = client.ExporterImage + exporterAttrs = map[string]string{ + "image.name": strings.Join(names, ","), + "name": strings.Join(names, ","), + } } if len(opt.Options.Outputs) > 1 { return nil, errors.Errorf("multiple outputs not supported") + } else if len(opt.Options.Outputs) == 0 { + if !b.useSnapshotter { + exporterName = "moby" + } } else if len(opt.Options.Outputs) == 1 { // cacheonly is a special type for triggering skipping all exporters if opt.Options.Outputs[0].Type != "cacheonly" { @@ -355,7 +366,11 @@ func (b *Builder) Build(ctx context.Context, opt backend.BuildConfig) (*builder. exporterAttrs = opt.Options.Outputs[0].Attrs } } - + if !b.useSnapshotter && exporterName == "moby" { + if len(opt.Options.Tags) > 0 { + exporterAttrs["name"] = strings.Join(opt.Options.Tags, ",") + } + } cache := controlapi.CacheOptions{} if inlineCache := opt.Options.BuildArgs["BUILDKIT_INLINE_CACHE"]; inlineCache != nil { From 925f4f1a979e1d143a9b4da167747b5be80aa0e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Gronowski?= Date: Fri, 22 Jul 2022 19:02:08 +0200 Subject: [PATCH 21/90] hack/emptyfs: Produce newer image layout MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The legacy v1 is not supported by the containerd import Signed-off-by: Paweł Gronowski --- hack/make/.ensure-emptyfs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/hack/make/.ensure-emptyfs b/hack/make/.ensure-emptyfs index 898cc22834d33..bdbf9a4c935b9 100644 --- a/hack/make/.ensure-emptyfs +++ b/hack/make/.ensure-emptyfs @@ -7,11 +7,14 @@ if ! docker image inspect emptyfs > /dev/null; then # and also https://github.com/docker/docker/issues/4242 dir="$DEST/emptyfs" uuid=511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 + mkdir -p "$dir/$uuid" ( - echo '{"emptyfs":{"latest":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158"}}' > "$dir/repositories" + echo -n '[{"Config":"11f64303f0f7ffdc71f001788132bca5346831939a956e3e975c93267d89a16d.json","RepoTags":["emptyfs:latest"],"Layers":["511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar"]}]' > "$dir/manifest.json" + echo -n '{"emptyfs":{"latest":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158"}}' > "$dir/repositories" + echo -n '{"architecture":"x86_64","comment":"Imported from -","container_config":{"Hostname":"","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"created":"2013-06-13T14:03:50.821769-07:00","docker_version":"0.4.0","history":[{"created":"2013-06-13T14:03:50.821769-07:00","comment":"Imported from -"}],"rootfs":{"type":"layers","diff_ids":["sha256:84ff92691f909a05b224e1c56abb4864f01b4f8e3c854e4bb4c7baf1d3f6d652"]}}' > "$dir/11f64303f0f7ffdc71f001788132bca5346831939a956e3e975c93267d89a16d.json" cd "$dir/$uuid" - echo '{"id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":null},"docker_version":"0.4.0","architecture":"x86_64","Size":0}' > json + echo -n '{"id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":null},"docker_version":"0.4.0","architecture":"x86_64","Size":0}' > json echo '1.0' > VERSION tar -cf layer.tar --files-from /dev/null ) From 576c4d7b39f3016778caf3a5a038aa282c72fd97 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Gronowski?= Date: Thu, 28 Jul 2022 14:50:45 +0200 Subject: [PATCH 22/90] integration-cli: Correct emptyfs id under c8d MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Paweł Gronowski --- integration-cli/docker_cli_inspect_test.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/integration-cli/docker_cli_inspect_test.go b/integration-cli/docker_cli_inspect_test.go index 6554792e016fc..0fe9106585186 100644 --- a/integration-cli/docker_cli_inspect_test.go +++ b/integration-cli/docker_cli_inspect_test.go @@ -42,6 +42,12 @@ func (s *DockerCLIInspectSuite) TestInspectImage(c *testing.T) { // fails, fix the difference in the image serialization instead of // updating this hash. imageTestID := "sha256:11f64303f0f7ffdc71f001788132bca5346831939a956e3e975c93267d89a16d" + usesContainerdSnapshotter := false // TODO(vvoland): Check for feature flag + if usesContainerdSnapshotter { + // Under containerd ID of the image is the digest of the manifest list. + imageTestID = "sha256:e43ca824363c5c56016f6ede3a9035afe0e9bd43333215e0b0bde6193969725d" + } + id := inspectField(c, imageTest, "Id") assert.Equal(c, id, imageTestID) From 7b41cef90fe1a86b60e8ee7f87aaf7b7977e0064 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Gronowski?= Date: Mon, 25 Jul 2022 14:49:41 +0200 Subject: [PATCH 23/90] containerd/load: Load all platforms MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To make it possible to load emptyfs which is amd64 only Signed-off-by: Paweł Gronowski --- daemon/containerd/image_exporter.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/daemon/containerd/image_exporter.go b/daemon/containerd/image_exporter.go index 3f99718f1017f..13e03af5076cf 100644 --- a/daemon/containerd/image_exporter.go +++ b/daemon/containerd/image_exporter.go @@ -37,7 +37,7 @@ func (i *ImageService) ExportImage(ctx context.Context, names []string, outStrea // complement of ExportImage. The input stream is an uncompressed tar // ball containing images and metadata. func (i *ImageService) LoadImage(ctx context.Context, inTar io.ReadCloser, outStream io.Writer, quiet bool) error { - platform := platforms.DefaultStrict() + platform := platforms.All imgs, err := i.client.Import(ctx, inTar, containerd.WithImportPlatform(platform)) if err != nil { From 53f123d8768e5323fa904c679f13f6ee3906a448 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Gronowski?= Date: Mon, 25 Jul 2022 14:49:41 +0200 Subject: [PATCH 24/90] containerd/save: Export only present platform manifests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Paweł Gronowski --- daemon/containerd/image_exporter.go | 104 ++++++++++++++++++++++------ 1 file changed, 83 insertions(+), 21 deletions(-) diff --git a/daemon/containerd/image_exporter.go b/daemon/containerd/image_exporter.go index 3f99718f1017f..cf27af02b3a1c 100644 --- a/daemon/containerd/image_exporter.go +++ b/daemon/containerd/image_exporter.go @@ -5,34 +5,17 @@ import ( "io" "github.com/containerd/containerd" + cerrdefs "github.com/containerd/containerd/errdefs" + containerdimages "github.com/containerd/containerd/images" "github.com/containerd/containerd/images/archive" + "github.com/containerd/containerd/images/converter" "github.com/containerd/containerd/platforms" "github.com/docker/distribution/reference" + "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) -// ExportImage exports a list of images to the given output stream. The -// exported images are archived into a tar when written to the output -// stream. All images with the given tag and all versions containing -// the same tag are exported. names is the set of tags to export, and -// outStream is the writer which the images are written to. -func (i *ImageService) ExportImage(ctx context.Context, names []string, outStream io.Writer) error { - opts := []archive.ExportOpt{ - archive.WithPlatform(platforms.Ordered(platforms.DefaultSpec())), - archive.WithSkipNonDistributableBlobs(), - } - is := i.client.ImageService() - for _, imageRef := range names { - named, err := reference.ParseDockerRef(imageRef) - if err != nil { - return err - } - opts = append(opts, archive.WithImage(is, named.String())) - } - return i.client.Export(ctx, outStream, opts...) -} - // LoadImage uploads a set of images into the repository. This is the // complement of ExportImage. The input stream is an uncompressed tar // ball containing images and metadata. @@ -64,3 +47,82 @@ func (i *ImageService) LoadImage(ctx context.Context, inTar io.ReadCloser, outSt } return nil } + +// ExportImage exports a list of images to the given output stream. The +// exported images are archived into a tar when written to the output +// stream. All images with the given tag and all versions containing +// the same tag are exported. names is the set of tags to export, and +// outStream is the writer which the images are written to. +func (i *ImageService) ExportImage(ctx context.Context, names []string, outStream io.Writer) error { + opts := []archive.ExportOpt{ + archive.WithSkipNonDistributableBlobs(), + } + + for _, imageRef := range names { + var err error + opts, err = i.appendImageForExport(ctx, opts, imageRef) + if err != nil { + return err + } + } + + return i.client.Export(ctx, outStream, opts...) +} + +func (i *ImageService) appendImageForExport(ctx context.Context, opts []archive.ExportOpt, name string) ([]archive.ExportOpt, error) { + ref, err := reference.ParseDockerRef(name) + if err != nil { + return opts, err + } + + is := i.client.ImageService() + + img, err := is.Get(ctx, ref.String()) + if err != nil { + return opts, err + } + + store := i.client.ContentStore() + + if containerdimages.IsIndexType(img.Target.MediaType) { + children, err := containerdimages.Children(ctx, store, img.Target) + if err != nil { + return opts, err + } + + // Check which platform manifests we have blobs for. + missingPlatforms := []v1.Platform{} + presentPlatforms := []v1.Platform{} + for _, child := range children { + if containerdimages.IsManifestType(child.MediaType) { + _, err := store.ReaderAt(ctx, child) + if cerrdefs.IsNotFound(err) { + missingPlatforms = append(missingPlatforms, *child.Platform) + logrus.WithField("digest", child.Digest.String()).Debug("missing blob, not exporting") + continue + } else if err != nil { + return opts, err + } + presentPlatforms = append(presentPlatforms, *child.Platform) + } + } + + // If we have all the manifests, just export the original index. + if len(missingPlatforms) == 0 { + return append(opts, archive.WithImage(is, img.Name)), nil + } + + // Create a new manifest which contains only the manifests we have in store. + srcRef := ref.String() + targetRef := srcRef + "-tmp-export" + newImg, err := converter.Convert(ctx, i.client, targetRef, srcRef, + converter.WithPlatform(platforms.Any(presentPlatforms...))) + if err != nil { + return opts, err + } + defer i.client.ImageService().Delete(ctx, newImg.Name, containerdimages.SynchronousDelete()) + return append(opts, archive.WithManifest(newImg.Target, srcRef)), nil + } + + return append(opts, archive.WithImage(is, img.Name)), nil +} From a03029bdfb1b193377df7b75b84ab92ff782f8a4 Mon Sep 17 00:00:00 2001 From: Nicolas De Loof Date: Thu, 4 Aug 2022 10:37:04 +0200 Subject: [PATCH 25/90] Introduce support for docker commit Signed-off-by: Nicolas De Loof --- builder/builder.go | 2 +- builder/dockerfile/dispatchers.go | 4 +- builder/dockerfile/internals.go | 6 +- builder/dockerfile/mockbackend_test.go | 2 +- daemon/commit.go | 2 +- daemon/containerd/image.go | 4 +- daemon/containerd/image_commit.go | 301 ++++++++++++++++++++++++- daemon/containerd/service.go | 10 +- daemon/daemon.go | 2 +- daemon/image_service.go | 4 +- daemon/images/image_commit.go | 7 +- 11 files changed, 320 insertions(+), 24 deletions(-) diff --git a/builder/builder.go b/builder/builder.go index 2fc371da90246..f23de283e5273 100644 --- a/builder/builder.go +++ b/builder/builder.go @@ -42,7 +42,7 @@ type Backend interface { // CommitBuildStep creates a new Docker image from the config generated by // a build step. - CommitBuildStep(backend.CommitConfig) (image.ID, error) + CommitBuildStep(context.Context, backend.CommitConfig) (image.ID, error) // ContainerCreateWorkdir creates the workdir ContainerCreateWorkdir(containerID string) error diff --git a/builder/dockerfile/dispatchers.go b/builder/dockerfile/dispatchers.go index 78ae91683ffba..ac27a18105395 100644 --- a/builder/dockerfile/dispatchers.go +++ b/builder/dockerfile/dispatchers.go @@ -316,7 +316,7 @@ func dispatchWorkdir(ctx context.Context, d dispatchRequest, c *instructions.Wor return err } - return d.builder.commitContainer(d.state, containerID, runConfigWithCommentCmd) + return d.builder.commitContainer(ctx, d.state, containerID, runConfigWithCommentCmd) } // RUN some command yo @@ -390,7 +390,7 @@ func dispatchRun(ctx context.Context, d dispatchRequest, c *instructions.RunComm runConfigForCacheProbe.ArgsEscaped = stateRunConfig.ArgsEscaped } - return d.builder.commitContainer(d.state, cID, runConfigForCacheProbe) + return d.builder.commitContainer(ctx, d.state, cID, runConfigForCacheProbe) } // Derive the command to use for probeCache() and to commit in this container. diff --git a/builder/dockerfile/internals.go b/builder/dockerfile/internals.go index 544b977f240f2..1baf63366dbc4 100644 --- a/builder/dockerfile/internals.go +++ b/builder/dockerfile/internals.go @@ -87,10 +87,10 @@ func (b *Builder) commit(ctx context.Context, dispatchState *dispatchState, comm return err } - return b.commitContainer(dispatchState, id, runConfigWithCommentCmd) + return b.commitContainer(ctx, dispatchState, id, runConfigWithCommentCmd) } -func (b *Builder) commitContainer(dispatchState *dispatchState, id string, containerConfig *container.Config) error { +func (b *Builder) commitContainer(ctx context.Context, dispatchState *dispatchState, id string, containerConfig *container.Config) error { if b.disableCommit { return nil } @@ -103,7 +103,7 @@ func (b *Builder) commitContainer(dispatchState *dispatchState, id string, conta ContainerID: id, } - imageID, err := b.docker.CommitBuildStep(commitCfg) + imageID, err := b.docker.CommitBuildStep(ctx, commitCfg) dispatchState.imageID = string(imageID) return err } diff --git a/builder/dockerfile/mockbackend_test.go b/builder/dockerfile/mockbackend_test.go index da77054521c9e..9f3ae759e2453 100644 --- a/builder/dockerfile/mockbackend_test.go +++ b/builder/dockerfile/mockbackend_test.go @@ -39,7 +39,7 @@ func (m *MockBackend) ContainerRm(name string, config *types.ContainerRmConfig) return nil } -func (m *MockBackend) CommitBuildStep(c backend.CommitConfig) (image.ID, error) { +func (m *MockBackend) CommitBuildStep(ctx context.Context, c backend.CommitConfig) (image.ID, error) { if m.commitFunc != nil { return m.commitFunc(c) } diff --git a/daemon/commit.go b/daemon/commit.go index fbb02469a33e4..45af6ac8d9797 100644 --- a/daemon/commit.go +++ b/daemon/commit.go @@ -155,7 +155,7 @@ func (daemon *Daemon) CreateImageFromContainer(ctx context.Context, name string, return "", err } - id, err := daemon.imageService.CommitImage(backend.CommitConfig{ + id, err := daemon.imageService.CommitImage(ctx, backend.CommitConfig{ Author: c.Author, Comment: c.Comment, Config: newConfig, diff --git a/daemon/containerd/image.go b/daemon/containerd/image.go index ee19399379391..bcb4fd1564de1 100644 --- a/daemon/containerd/image.go +++ b/daemon/containerd/image.go @@ -29,7 +29,7 @@ func (i *ImageService) GetContainerdImage(ctx context.Context, refOrID string, p // GetImage returns an image corresponding to the image referred to by refOrID. func (i *ImageService) GetImage(ctx context.Context, refOrID string, options imagetype.GetImageOpts) (*image.Image, error) { - ii, img, err := i.getImage(ctx, refOrID, options.Platform) + ii, img, err := i.getImage(ctx, refOrID) if err != nil { return nil, err } @@ -49,7 +49,7 @@ func (i *ImageService) GetImage(ctx context.Context, refOrID string, options ima return img, err } -func (i *ImageService) getImage(ctx context.Context, refOrID string, platform *ocispec.Platform) (containerd.Image, *image.Image, error) { +func (i *ImageService) getImage(ctx context.Context, refOrID string) (containerd.Image, *image.Image, error) { desc, err := i.ResolveImage(ctx, refOrID) if err != nil { return nil, nil, err diff --git a/daemon/containerd/image_commit.go b/daemon/containerd/image_commit.go index bb5a6cc348f6c..113e39f49646c 100644 --- a/daemon/containerd/image_commit.go +++ b/daemon/containerd/image_commit.go @@ -1,13 +1,306 @@ package containerd import ( + "bytes" + "context" + "crypto/rand" + "encoding/base64" + "encoding/json" + "fmt" + "runtime" + "time" + + "github.com/containerd/containerd" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/rootfs" + "github.com/containerd/containerd/snapshots" "github.com/docker/docker/api/types/backend" + containerapi "github.com/docker/docker/api/types/container" "github.com/docker/docker/image" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/identity" + "github.com/opencontainers/image-spec/specs-go" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" ) -// CommitImage creates a new image from a commit config. -func (i *ImageService) CommitImage(c backend.CommitConfig) (image.ID, error) { - panic("not implemented") +/* +This code is based on `commit` support in nerdctl, under Apache License +https://github.com/containerd/nerdctl/blob/master/pkg/imgutil/commit/commit.go +with adaptations to match the Moby data model and services. +*/ + +func (i *ImageService) CommitImage(ctx context.Context, cc backend.CommitConfig) (image.ID, error) { + container := i.containers.Get(cc.ContainerID) + + cimg, _, err := i.getImage(ctx, container.Config.Image) + if err != nil { + return "", err + } + + baseImgWithoutPlatform, err := i.client.ImageService().Get(ctx, cimg.Name()) + if err != nil { + return "", err + } + + baseImg := containerd.NewImageWithPlatform(i.client, baseImgWithoutPlatform, platforms.DefaultStrict()) + + contentStore := baseImg.ContentStore() + conf, err := baseImg.Config(ctx) + if err != nil { + return "", err + } + imageConfigBytes, err := content.ReadBlob(ctx, baseImg.ContentStore(), conf) + if err != nil { + return "", err + } + var ociimage ocispec.Image + if err := json.Unmarshal(imageConfigBytes, &ociimage); err != nil { + return "", err + } + + target := baseImg.Target() + b, err := content.ReadBlob(ctx, contentStore, target) + var ocimanifest ocispec.Manifest + if err := json.Unmarshal(b, &ocimanifest); err != nil { + return "", err + } + + var ( + differ = i.client.DiffService() + sn = i.client.SnapshotService(containerd.DefaultSnapshotter) + ) + + // Don't gc me and clean the dirty data after 1 hour! + ctx, done, err := i.client.WithLease(ctx, leases.WithRandomID(), leases.WithExpiration(1*time.Hour)) + if err != nil { + return "", fmt.Errorf("failed to create lease for commit: %w", err) + } + defer done(ctx) + + diffLayerDesc, diffID, err := createDiff(ctx, cc.ContainerID, sn, contentStore, differ) + if err != nil { + return "", fmt.Errorf("failed to export layer: %w", err) + } + + imageConfig, err := generateCommitImageConfig(ctx, container.Config, ociimage, diffID, cc) + if err != nil { + return "", fmt.Errorf("failed to generate commit image config: %w", err) + } + + rootfsID := identity.ChainID(imageConfig.RootFS.DiffIDs).String() + if err := applyDiffLayer(ctx, rootfsID, ociimage, sn, differ, diffLayerDesc); err != nil { + return "", fmt.Errorf("failed to apply diff: %w", err) + } + + layers := append(ocimanifest.Layers, diffLayerDesc) + commitManifestDesc, configDigest, err := writeContentsForImage(ctx, containerd.DefaultSnapshotter, baseImg, imageConfig, layers) + if err != nil { + return "", err + } + + // image create + img := images.Image{ + Name: configDigest.String(), + Target: commitManifestDesc, + CreatedAt: time.Now(), + } + + if _, err := i.client.ImageService().Update(ctx, img); err != nil { + if !errdefs.IsNotFound(err) { + return "", err + } + + if _, err := i.client.ImageService().Create(ctx, img); err != nil { + return "", fmt.Errorf("failed to create new image: %w", err) + } + } + return image.ID(img.Target.Digest), nil +} + +// generateCommitImageConfig returns commit oci image config based on the container's image. +func generateCommitImageConfig(ctx context.Context, container *containerapi.Config, baseConfig ocispec.Image, diffID digest.Digest, opts backend.CommitConfig) (ocispec.Image, error) { + if opts.Config.Cmd != nil { + baseConfig.Config.Cmd = opts.Config.Cmd + } + if opts.Config.Entrypoint != nil { + baseConfig.Config.Entrypoint = opts.Config.Entrypoint + } + if opts.Author == "" { + opts.Author = baseConfig.Author + } + + createdTime := time.Now() + arch := baseConfig.Architecture + if arch == "" { + arch = runtime.GOARCH + logrus.Warnf("assuming arch=%q", arch) + } + os := baseConfig.OS + if os == "" { + os = runtime.GOOS + logrus.Warnf("assuming os=%q", os) + } + logrus.Debugf("generateCommitImageConfig(): arch=%q, os=%q", arch, os) + return ocispec.Image{ + Architecture: arch, + OS: os, + Created: &createdTime, + Author: opts.Author, + Config: baseConfig.Config, + RootFS: ocispec.RootFS{ + Type: "layers", + DiffIDs: append(baseConfig.RootFS.DiffIDs, diffID), + }, + History: append(baseConfig.History, ocispec.History{ + Created: &createdTime, + CreatedBy: "", // FIXME(ndeloof) ? + Author: opts.Author, + Comment: opts.Comment, + EmptyLayer: diffID == "", + }), + }, nil +} + +// writeContentsForImage will commit oci image config and manifest into containerd's content store. +func writeContentsForImage(ctx context.Context, snName string, baseImg containerd.Image, newConfig ocispec.Image, layers []ocispec.Descriptor) (ocispec.Descriptor, image.ID, error) { + newConfigJSON, err := json.Marshal(newConfig) + if err != nil { + return ocispec.Descriptor{}, "", err + } + + configDesc := ocispec.Descriptor{ + MediaType: images.MediaTypeDockerSchema2Config, + Digest: digest.FromBytes(newConfigJSON), + Size: int64(len(newConfigJSON)), + } + + newMfst := struct { + MediaType string `json:"mediaType,omitempty"` + ocispec.Manifest + }{ + MediaType: images.MediaTypeDockerSchema2Manifest, + Manifest: ocispec.Manifest{ + Versioned: specs.Versioned{ + SchemaVersion: 2, + }, + Config: configDesc, + Layers: layers, + }, + } + + newMfstJSON, err := json.MarshalIndent(newMfst, "", " ") + if err != nil { + return ocispec.Descriptor{}, "", err + } + + newMfstDesc := ocispec.Descriptor{ + MediaType: images.MediaTypeDockerSchema2Manifest, + Digest: digest.FromBytes(newMfstJSON), + Size: int64(len(newMfstJSON)), + } + + // new manifest should reference the layers and config content + labels := map[string]string{ + "containerd.io/gc.ref.content.0": configDesc.Digest.String(), + } + for i, l := range layers { + labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i+1)] = l.Digest.String() + } + + err = content.WriteBlob(ctx, baseImg.ContentStore(), newMfstDesc.Digest.String(), bytes.NewReader(newMfstJSON), newMfstDesc, content.WithLabels(labels)) + if err != nil { + return ocispec.Descriptor{}, "", err + } + + // config should reference to snapshotter + labelOpt := content.WithLabels(map[string]string{ + fmt.Sprintf("containerd.io/gc.ref.snapshot.%s", snName): identity.ChainID(newConfig.RootFS.DiffIDs).String(), + }) + err = content.WriteBlob(ctx, baseImg.ContentStore(), configDesc.Digest.String(), bytes.NewReader(newConfigJSON), configDesc, labelOpt) + if err != nil { + return ocispec.Descriptor{}, "", err + } + + return newMfstDesc, image.ID(configDesc.Digest), nil +} + +// createDiff creates a layer diff into containerd's content store. +func createDiff(ctx context.Context, name string, sn snapshots.Snapshotter, cs content.Store, comparer diff.Comparer) (ocispec.Descriptor, digest.Digest, error) { + newDesc, err := rootfs.CreateDiff(ctx, name, sn, comparer) + if err != nil { + return ocispec.Descriptor{}, digest.Digest(""), err + } + + info, err := cs.Info(ctx, newDesc.Digest) + if err != nil { + return ocispec.Descriptor{}, digest.Digest(""), err + } + + diffIDStr, ok := info.Labels["containerd.io/uncompressed"] + if !ok { + return ocispec.Descriptor{}, digest.Digest(""), fmt.Errorf("invalid differ response with no diffID") + } + + diffID, err := digest.Parse(diffIDStr) + if err != nil { + return ocispec.Descriptor{}, digest.Digest(""), err + } + + return ocispec.Descriptor{ + MediaType: images.MediaTypeDockerSchema2LayerGzip, + Digest: newDesc.Digest, + Size: info.Size, + }, diffID, nil +} + +// applyDiffLayer will apply diff layer content created by createDiff into the snapshotter. +func applyDiffLayer(ctx context.Context, name string, baseImg ocispec.Image, sn snapshots.Snapshotter, differ diff.Applier, diffDesc ocispec.Descriptor) (retErr error) { + var ( + key = uniquePart() + "-" + name + parent = identity.ChainID(baseImg.RootFS.DiffIDs).String() + ) + + mount, err := sn.Prepare(ctx, key, parent) + if err != nil { + return err + } + + defer func() { + if retErr != nil { + // NOTE: the snapshotter should be hold by lease. Even + // if the cleanup fails, the containerd gc can delete it. + if err := sn.Remove(ctx, key); err != nil { + logrus.Warnf("failed to cleanup aborted apply %s: %s", key, err) + } + } + }() + + if _, err = differ.Apply(ctx, diffDesc, mount); err != nil { + return err + } + + if err = sn.Commit(ctx, name, key); err != nil { + if errdefs.IsAlreadyExists(err) { + return nil + } + return err + } + return nil +} + +// copied from github.com/containerd/containerd/rootfs/apply.go +func uniquePart() string { + t := time.Now() + var b [3]byte + // Ignore read failures, just decreases uniqueness + rand.Read(b[:]) + return fmt.Sprintf("%d-%s", t.Nanosecond(), base64.URLEncoding.EncodeToString(b[:])) } // CommitBuildStep is used by the builder to create an image for each step in @@ -19,6 +312,6 @@ func (i *ImageService) CommitImage(c backend.CommitConfig) (image.ID, error) { // - it doesn't log a container commit event // // This is a temporary shim. Should be removed when builder stops using commit. -func (i *ImageService) CommitBuildStep(c backend.CommitConfig) (image.ID, error) { +func (i *ImageService) CommitBuildStep(ctx context.Context, c backend.CommitConfig) (image.ID, error) { panic("not implemented") } diff --git a/daemon/containerd/service.go b/daemon/containerd/service.go index 1c0686e74423a..bc9ff60a68648 100644 --- a/daemon/containerd/service.go +++ b/daemon/containerd/service.go @@ -19,14 +19,16 @@ import ( // ImageService implements daemon.ImageService type ImageService struct { - client *containerd.Client - usage singleflight.Group + client *containerd.Client + usage singleflight.Group + containers container.Store } // NewService creates a new ImageService. -func NewService(c *containerd.Client) *ImageService { +func NewService(c *containerd.Client, containers container.Store) *ImageService { return &ImageService{ - client: c, + client: c, + containers: containers, } } diff --git a/daemon/daemon.go b/daemon/daemon.go index 6d104821042fe..45e2f0f675d38 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -1027,7 +1027,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S d.linkIndex = newLinkIndex() if d.UsesSnapshotter() { - d.imageService = ctrd.NewService(d.containerdCli) + d.imageService = ctrd.NewService(d.containerdCli, d.containers) } else { ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) if err != nil { diff --git a/daemon/image_service.go b/daemon/image_service.go index 9eae28ef8fe8a..83f382af3f139 100644 --- a/daemon/image_service.go +++ b/daemon/image_service.go @@ -40,7 +40,7 @@ type ImageService interface { TagImageWithReference(ctx context.Context, imageID image.ID, newTag reference.Named) error GetImage(ctx context.Context, refOrID string, options imagetype.GetImageOpts) (*image.Image, error) ImageHistory(ctx context.Context, name string) ([]*imagetype.HistoryResponseItem, error) - CommitImage(c backend.CommitConfig) (image.ID, error) + CommitImage(ctx context.Context, c backend.CommitConfig) (image.ID, error) SquashImage(id, parent string) (string, error) // Layers @@ -61,7 +61,7 @@ type ImageService interface { // Build MakeImageCache(ctx context.Context, cacheFrom []string) builder.ImageCache - CommitBuildStep(c backend.CommitConfig) (image.ID, error) + CommitBuildStep(ctx context.Context, c backend.CommitConfig) (image.ID, error) // Other diff --git a/daemon/images/image_commit.go b/daemon/images/image_commit.go index dcde88adc4690..c6924d9f62124 100644 --- a/daemon/images/image_commit.go +++ b/daemon/images/image_commit.go @@ -1,6 +1,7 @@ package images // import "github.com/docker/docker/daemon/images" import ( + "context" "encoding/json" "io" @@ -12,7 +13,7 @@ import ( ) // CommitImage creates a new image from a commit config -func (i *ImageService) CommitImage(c backend.CommitConfig) (image.ID, error) { +func (i *ImageService) CommitImage(ctx context.Context, c backend.CommitConfig) (image.ID, error) { rwTar, err := exportContainerRw(i.layerStore, c.ContainerID, c.ContainerMountLabel) if err != nil { return "", err @@ -109,7 +110,7 @@ func exportContainerRw(layerStore layer.Store, id, mountLabel string) (arch io.R // - it doesn't log a container commit event // // This is a temporary shim. Should be removed when builder stops using commit. -func (i *ImageService) CommitBuildStep(c backend.CommitConfig) (image.ID, error) { +func (i *ImageService) CommitBuildStep(ctx context.Context, c backend.CommitConfig) (image.ID, error) { container := i.containers.Get(c.ContainerID) if container == nil { // TODO: use typed error @@ -118,5 +119,5 @@ func (i *ImageService) CommitBuildStep(c backend.CommitConfig) (image.ID, error) c.ContainerMountLabel = container.MountLabel c.ContainerOS = container.OS c.ParentImageID = string(container.ImageID) - return i.CommitImage(c) + return i.CommitImage(ctx, c) } From 3868cb5b67007b5b449adb43a890eaf61ff4e5f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Gronowski?= Date: Thu, 4 Aug 2022 10:47:55 +0200 Subject: [PATCH 26/90] run hack/vendor.sh MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move opentelemetry dependencies to direct Signed-off-by: Paweł Gronowski --- vendor.mod | 4 +- .../moby/buildkit/control/control.go | 502 ------------------ .../github.com/moby/buildkit/control/init.go | 10 - vendor/modules.txt | 1 - 4 files changed, 2 insertions(+), 515 deletions(-) delete mode 100644 vendor/github.com/moby/buildkit/control/control.go delete mode 100644 vendor/github.com/moby/buildkit/control/init.go diff --git a/vendor.mod b/vendor.mod index c873bfee0050b..8dd540ada5701 100644 --- a/vendor.mod +++ b/vendor.mod @@ -78,6 +78,8 @@ require ( github.com/vishvananda/netlink v1.2.1-beta.2 github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f go.etcd.io/bbolt v1.3.6 + go.opentelemetry.io/otel/sdk v1.4.1 + go.opentelemetry.io/proto/otlp v0.12.0 golang.org/x/net v0.0.0-20211216030914-fe4d6282115f golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a @@ -150,9 +152,7 @@ require ( go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1 // indirect go.opentelemetry.io/otel/internal/metric v0.27.0 // indirect go.opentelemetry.io/otel/metric v0.27.0 // indirect - go.opentelemetry.io/otel/sdk v1.4.1 // indirect go.opentelemetry.io/otel/trace v1.4.1 // indirect - go.opentelemetry.io/proto/otlp v0.12.0 // indirect go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.17.0 // indirect diff --git a/vendor/github.com/moby/buildkit/control/control.go b/vendor/github.com/moby/buildkit/control/control.go deleted file mode 100644 index 0d3e7976e5b77..0000000000000 --- a/vendor/github.com/moby/buildkit/control/control.go +++ /dev/null @@ -1,502 +0,0 @@ -package control - -import ( - "context" - "sync" - "sync/atomic" - "time" - - "github.com/moby/buildkit/util/bklog" - - controlapi "github.com/moby/buildkit/api/services/control" - apitypes "github.com/moby/buildkit/api/types" - "github.com/moby/buildkit/cache/remotecache" - "github.com/moby/buildkit/client" - controlgateway "github.com/moby/buildkit/control/gateway" - "github.com/moby/buildkit/exporter" - "github.com/moby/buildkit/frontend" - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/session/grpchijack" - "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/solver/llbsolver" - "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/imageutil" - "github.com/moby/buildkit/util/throttle" - "github.com/moby/buildkit/util/tracing/transform" - "github.com/moby/buildkit/worker" - "github.com/pkg/errors" - sdktrace "go.opentelemetry.io/otel/sdk/trace" - tracev1 "go.opentelemetry.io/proto/otlp/collector/trace/v1" - "golang.org/x/sync/errgroup" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type Opt struct { - SessionManager *session.Manager - WorkerController *worker.Controller - Frontends map[string]frontend.Frontend - CacheKeyStorage solver.CacheKeyStorage - ResolveCacheExporterFuncs map[string]remotecache.ResolveCacheExporterFunc - ResolveCacheImporterFuncs map[string]remotecache.ResolveCacheImporterFunc - Entitlements []string - TraceCollector sdktrace.SpanExporter -} - -type Controller struct { // TODO: ControlService - // buildCount needs to be 64bit aligned - buildCount int64 - opt Opt - solver *llbsolver.Solver - cache solver.CacheManager - gatewayForwarder *controlgateway.GatewayForwarder - throttledGC func() - gcmu sync.Mutex - *tracev1.UnimplementedTraceServiceServer -} - -func NewController(opt Opt) (*Controller, error) { - cache := solver.NewCacheManager(context.TODO(), "local", opt.CacheKeyStorage, worker.NewCacheResultStorage(opt.WorkerController)) - - gatewayForwarder := controlgateway.NewGatewayForwarder() - - solver, err := llbsolver.New(opt.WorkerController, opt.Frontends, cache, opt.ResolveCacheImporterFuncs, gatewayForwarder, opt.SessionManager, opt.Entitlements) - if err != nil { - return nil, errors.Wrap(err, "failed to create solver") - } - - c := &Controller{ - opt: opt, - solver: solver, - cache: cache, - gatewayForwarder: gatewayForwarder, - } - c.throttledGC = throttle.After(time.Minute, c.gc) - - defer func() { - time.AfterFunc(time.Second, c.throttledGC) - }() - - return c, nil -} - -func (c *Controller) Register(server *grpc.Server) error { - controlapi.RegisterControlServer(server, c) - c.gatewayForwarder.Register(server) - tracev1.RegisterTraceServiceServer(server, c) - return nil -} - -func (c *Controller) DiskUsage(ctx context.Context, r *controlapi.DiskUsageRequest) (*controlapi.DiskUsageResponse, error) { - resp := &controlapi.DiskUsageResponse{} - workers, err := c.opt.WorkerController.List() - if err != nil { - return nil, err - } - for _, w := range workers { - du, err := w.DiskUsage(ctx, client.DiskUsageInfo{ - Filter: r.Filter, - }) - if err != nil { - return nil, err - } - - for _, r := range du { - resp.Record = append(resp.Record, &controlapi.UsageRecord{ - // TODO: add worker info - ID: r.ID, - Mutable: r.Mutable, - InUse: r.InUse, - Size_: r.Size, - Parents: r.Parents, - UsageCount: int64(r.UsageCount), - Description: r.Description, - CreatedAt: r.CreatedAt, - LastUsedAt: r.LastUsedAt, - RecordType: string(r.RecordType), - Shared: r.Shared, - }) - } - } - return resp, nil -} - -func (c *Controller) Prune(req *controlapi.PruneRequest, stream controlapi.Control_PruneServer) error { - if atomic.LoadInt64(&c.buildCount) == 0 { - imageutil.CancelCacheLeases() - } - - ch := make(chan client.UsageInfo) - - eg, ctx := errgroup.WithContext(stream.Context()) - workers, err := c.opt.WorkerController.List() - if err != nil { - return errors.Wrap(err, "failed to list workers for prune") - } - - didPrune := false - defer func() { - if didPrune { - if c, ok := c.cache.(interface { - ReleaseUnreferenced() error - }); ok { - if err := c.ReleaseUnreferenced(); err != nil { - bklog.G(ctx).Errorf("failed to release cache metadata: %+v", err) - } - } - } - }() - - for _, w := range workers { - func(w worker.Worker) { - eg.Go(func() error { - return w.Prune(ctx, ch, client.PruneInfo{ - Filter: req.Filter, - All: req.All, - KeepDuration: time.Duration(req.KeepDuration), - KeepBytes: req.KeepBytes, - }) - }) - }(w) - } - - eg2, _ := errgroup.WithContext(stream.Context()) - - eg2.Go(func() error { - defer close(ch) - return eg.Wait() - }) - - eg2.Go(func() error { - for r := range ch { - didPrune = true - if err := stream.Send(&controlapi.UsageRecord{ - // TODO: add worker info - ID: r.ID, - Mutable: r.Mutable, - InUse: r.InUse, - Size_: r.Size, - Parents: r.Parents, - UsageCount: int64(r.UsageCount), - Description: r.Description, - CreatedAt: r.CreatedAt, - LastUsedAt: r.LastUsedAt, - RecordType: string(r.RecordType), - Shared: r.Shared, - }); err != nil { - return err - } - } - return nil - }) - - return eg2.Wait() -} - -func (c *Controller) Export(ctx context.Context, req *tracev1.ExportTraceServiceRequest) (*tracev1.ExportTraceServiceResponse, error) { - if c.opt.TraceCollector == nil { - return nil, status.Errorf(codes.Unavailable, "trace collector not configured") - } - err := c.opt.TraceCollector.ExportSpans(ctx, transform.Spans(req.GetResourceSpans())) - if err != nil { - return nil, err - } - return &tracev1.ExportTraceServiceResponse{}, nil -} - -func translateLegacySolveRequest(req *controlapi.SolveRequest) error { - // translates ExportRef and ExportAttrs to new Exports (v0.4.0) - if legacyExportRef := req.Cache.ExportRefDeprecated; legacyExportRef != "" { - ex := &controlapi.CacheOptionsEntry{ - Type: "registry", - Attrs: req.Cache.ExportAttrsDeprecated, - } - if ex.Attrs == nil { - ex.Attrs = make(map[string]string) - } - ex.Attrs["ref"] = legacyExportRef - // FIXME(AkihiroSuda): skip append if already exists - req.Cache.Exports = append(req.Cache.Exports, ex) - req.Cache.ExportRefDeprecated = "" - req.Cache.ExportAttrsDeprecated = nil - } - // translates ImportRefs to new Imports (v0.4.0) - for _, legacyImportRef := range req.Cache.ImportRefsDeprecated { - im := &controlapi.CacheOptionsEntry{ - Type: "registry", - Attrs: map[string]string{"ref": legacyImportRef}, - } - // FIXME(AkihiroSuda): skip append if already exists - req.Cache.Imports = append(req.Cache.Imports, im) - } - req.Cache.ImportRefsDeprecated = nil - return nil -} - -func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (*controlapi.SolveResponse, error) { - atomic.AddInt64(&c.buildCount, 1) - defer atomic.AddInt64(&c.buildCount, -1) - - // This method registers job ID in solver.Solve. Make sure there are no blocking calls before that might delay this. - - if err := translateLegacySolveRequest(req); err != nil { - return nil, err - } - - defer func() { - time.AfterFunc(time.Second, c.throttledGC) - }() - - var expi exporter.ExporterInstance - // TODO: multiworker - // This is actually tricky, as the exporter should come from the worker that has the returned reference. We may need to delay this so that the solver loads this. - w, err := c.opt.WorkerController.GetDefault() - if err != nil { - return nil, err - } - if req.Exporter != "" { - exp, err := w.Exporter(req.Exporter, c.opt.SessionManager) - if err != nil { - return nil, err - } - expi, err = exp.Resolve(ctx, req.ExporterAttrs) - if err != nil { - return nil, err - } - } - - var ( - cacheExporter remotecache.Exporter - cacheExportMode solver.CacheExportMode - cacheImports []frontend.CacheOptionsEntry - ) - if len(req.Cache.Exports) > 1 { - // TODO(AkihiroSuda): this should be fairly easy - return nil, errors.New("specifying multiple cache exports is not supported currently") - } - - if len(req.Cache.Exports) == 1 { - e := req.Cache.Exports[0] - cacheExporterFunc, ok := c.opt.ResolveCacheExporterFuncs[e.Type] - if !ok { - return nil, errors.Errorf("unknown cache exporter: %q", e.Type) - } - cacheExporter, err = cacheExporterFunc(ctx, session.NewGroup(req.Session), e.Attrs) - if err != nil { - return nil, err - } - if exportMode, supported := parseCacheExportMode(e.Attrs["mode"]); !supported { - bklog.G(ctx).Debugf("skipping invalid cache export mode: %s", e.Attrs["mode"]) - } else { - cacheExportMode = exportMode - } - } - for _, im := range req.Cache.Imports { - cacheImports = append(cacheImports, frontend.CacheOptionsEntry{ - Type: im.Type, - Attrs: im.Attrs, - }) - } - - resp, err := c.solver.Solve(ctx, req.Ref, req.Session, frontend.SolveRequest{ - Frontend: req.Frontend, - Definition: req.Definition, - FrontendOpt: req.FrontendAttrs, - FrontendInputs: req.FrontendInputs, - CacheImports: cacheImports, - }, llbsolver.ExporterRequest{ - Exporter: expi, - CacheExporter: cacheExporter, - CacheExportMode: cacheExportMode, - }, req.Entitlements) - if err != nil { - return nil, err - } - return &controlapi.SolveResponse{ - ExporterResponse: resp.ExporterResponse, - }, nil -} - -func (c *Controller) Status(req *controlapi.StatusRequest, stream controlapi.Control_StatusServer) error { - ch := make(chan *client.SolveStatus, 8) - - eg, ctx := errgroup.WithContext(stream.Context()) - eg.Go(func() error { - return c.solver.Status(ctx, req.Ref, ch) - }) - - eg.Go(func() error { - for { - ss, ok := <-ch - if !ok { - return nil - } - logSize := 0 - for { - retry := false - sr := controlapi.StatusResponse{} - for _, v := range ss.Vertexes { - sr.Vertexes = append(sr.Vertexes, &controlapi.Vertex{ - Digest: v.Digest, - Inputs: v.Inputs, - Name: v.Name, - Started: v.Started, - Completed: v.Completed, - Error: v.Error, - Cached: v.Cached, - ProgressGroup: v.ProgressGroup, - }) - } - for _, v := range ss.Statuses { - sr.Statuses = append(sr.Statuses, &controlapi.VertexStatus{ - ID: v.ID, - Vertex: v.Vertex, - Name: v.Name, - Current: v.Current, - Total: v.Total, - Timestamp: v.Timestamp, - Started: v.Started, - Completed: v.Completed, - }) - } - for i, v := range ss.Logs { - sr.Logs = append(sr.Logs, &controlapi.VertexLog{ - Vertex: v.Vertex, - Stream: int64(v.Stream), - Msg: v.Data, - Timestamp: v.Timestamp, - }) - logSize += len(v.Data) + emptyLogVertexSize - // avoid logs growing big and split apart if they do - if logSize > 1024*1024 { - ss.Vertexes = nil - ss.Statuses = nil - ss.Logs = ss.Logs[i+1:] - retry = true - break - } - } - for _, v := range ss.Warnings { - sr.Warnings = append(sr.Warnings, &controlapi.VertexWarning{ - Vertex: v.Vertex, - Level: int64(v.Level), - Short: v.Short, - Detail: v.Detail, - Info: v.SourceInfo, - Ranges: v.Range, - Url: v.URL, - }) - } - if err := stream.SendMsg(&sr); err != nil { - return err - } - if !retry { - break - } - } - } - }) - - return eg.Wait() -} - -func (c *Controller) Session(stream controlapi.Control_SessionServer) error { - bklog.G(stream.Context()).Debugf("session started") - - conn, closeCh, opts := grpchijack.Hijack(stream) - defer conn.Close() - - ctx, cancel := context.WithCancel(stream.Context()) - go func() { - <-closeCh - cancel() - }() - - err := c.opt.SessionManager.HandleConn(ctx, conn, opts) - bklog.G(ctx).Debugf("session finished: %v", err) - return err -} - -func (c *Controller) ListWorkers(ctx context.Context, r *controlapi.ListWorkersRequest) (*controlapi.ListWorkersResponse, error) { - resp := &controlapi.ListWorkersResponse{} - workers, err := c.opt.WorkerController.List(r.Filter...) - if err != nil { - return nil, err - } - for _, w := range workers { - resp.Record = append(resp.Record, &apitypes.WorkerRecord{ - ID: w.ID(), - Labels: w.Labels(), - Platforms: pb.PlatformsFromSpec(w.Platforms(true)), - GCPolicy: toPBGCPolicy(w.GCPolicy()), - }) - } - return resp, nil -} - -func (c *Controller) gc() { - c.gcmu.Lock() - defer c.gcmu.Unlock() - - workers, err := c.opt.WorkerController.List() - if err != nil { - return - } - - eg, ctx := errgroup.WithContext(context.TODO()) - - var size int64 - ch := make(chan client.UsageInfo) - done := make(chan struct{}) - go func() { - for ui := range ch { - size += ui.Size - } - close(done) - }() - - for _, w := range workers { - func(w worker.Worker) { - eg.Go(func() error { - if policy := w.GCPolicy(); len(policy) > 0 { - return w.Prune(ctx, ch, policy...) - } - return nil - }) - }(w) - } - - err = eg.Wait() - close(ch) - if err != nil { - bklog.G(ctx).Errorf("gc error: %+v", err) - } - <-done - if size > 0 { - bklog.G(ctx).Debugf("gc cleaned up %d bytes", size) - } -} - -func parseCacheExportMode(mode string) (solver.CacheExportMode, bool) { - switch mode { - case "min": - return solver.CacheExportModeMin, true - case "max": - return solver.CacheExportModeMax, true - } - return solver.CacheExportModeMin, false -} - -func toPBGCPolicy(in []client.PruneInfo) []*apitypes.GCPolicy { - policy := make([]*apitypes.GCPolicy, 0, len(in)) - for _, p := range in { - policy = append(policy, &apitypes.GCPolicy{ - All: p.All, - KeepBytes: p.KeepBytes, - KeepDuration: int64(p.KeepDuration), - Filters: p.Filter, - }) - } - return policy -} diff --git a/vendor/github.com/moby/buildkit/control/init.go b/vendor/github.com/moby/buildkit/control/init.go deleted file mode 100644 index 2e86133e4120e..0000000000000 --- a/vendor/github.com/moby/buildkit/control/init.go +++ /dev/null @@ -1,10 +0,0 @@ -package control - -import controlapi "github.com/moby/buildkit/api/services/control" - -var emptyLogVertexSize int - -func init() { - emptyLogVertex := controlapi.VertexLog{} - emptyLogVertexSize = emptyLogVertex.Size() -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 900babefe8517..542dbc249c834 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -513,7 +513,6 @@ github.com/moby/buildkit/client/connhelper github.com/moby/buildkit/client/llb github.com/moby/buildkit/client/llb/imagemetaresolver github.com/moby/buildkit/client/ociindex -github.com/moby/buildkit/control github.com/moby/buildkit/control/gateway github.com/moby/buildkit/executor github.com/moby/buildkit/executor/containerdexecutor From 8da3075a58a526d2f62570ca8d5454e52040a158 Mon Sep 17 00:00:00 2001 From: Djordje Lukic Date: Thu, 4 Aug 2022 11:12:43 +0200 Subject: [PATCH 27/90] Fix linting issues Signed-off-by: Djordje Lukic --- daemon/containerd/image.go | 2 ++ daemon/containerd/progress.go | 1 - 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/daemon/containerd/image.go b/daemon/containerd/image.go index ee19399379391..a2b74ceca1310 100644 --- a/daemon/containerd/image.go +++ b/daemon/containerd/image.go @@ -23,6 +23,8 @@ import ( var shortID = regexp.MustCompile(`^([a-f0-9]{4,64})$`) +// GetContainerdImage returns the containerd image corresponding to the image referred to by refOrID. +// The platform parameter is currently ignored func (i *ImageService) GetContainerdImage(ctx context.Context, refOrID string, platform *ocispec.Platform) (containerdimages.Image, error) { return i.resolveImageName2(ctx, refOrID) } diff --git a/daemon/containerd/progress.go b/daemon/containerd/progress.go index b71f0971fca55..d159a76d1d5a8 100644 --- a/daemon/containerd/progress.go +++ b/daemon/containerd/progress.go @@ -160,7 +160,6 @@ func pullProgress(cs content.Store) updateProgressFunc { } type jobs struct { - name string resolved bool // resolved is set to true once all jobs are added descs map[digest.Digest]ocispec.Descriptor mu sync.Mutex From 8c57c43f3eee7e80e0ed095d7f8c0b8c9a88e89a Mon Sep 17 00:00:00 2001 From: Djordje Lukic Date: Mon, 8 Aug 2022 14:06:45 +0200 Subject: [PATCH 28/90] Fix `docker system df` This change fixes multiple things: * pass ctx instead of nil because we need it while computing the layer size * don't return an error if the container is not found during the layer size calculation, if a container is not found it means that it exited * create the container with the image it was created from, the image is used when computing the layer size Signed-off-by: Djordje Lukic --- daemon/containerd/service.go | 4 ++++ daemon/disk_usage.go | 2 +- daemon/start.go | 7 +++++++ 3 files changed, 12 insertions(+), 1 deletion(-) diff --git a/daemon/containerd/service.go b/daemon/containerd/service.go index bc9ff60a68648..a2a80e78bc88f 100644 --- a/daemon/containerd/service.go +++ b/daemon/containerd/service.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/containerd/containerd" + cerrdefs "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/snapshots" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" @@ -183,6 +184,9 @@ func (i *ImageService) GetContainerLayerSize(ctx context.Context, containerID st c, err := i.client.ContainerService().Get(ctx, containerID) if err != nil { + if cerrdefs.IsNotFound(err) { + return 0, 0, nil + } return 0, 0, err } image, err := i.client.GetImage(ctx, c.Image) diff --git a/daemon/disk_usage.go b/daemon/disk_usage.go index c61234d6547e5..a00b1a0465bad 100644 --- a/daemon/disk_usage.go +++ b/daemon/disk_usage.go @@ -14,7 +14,7 @@ import ( func (daemon *Daemon) ContainerDiskUsage(ctx context.Context) ([]*types.Container, error) { ch := daemon.usage.DoChan("ContainerDiskUsage", func() (interface{}, error) { // Retrieve container list - containers, err := daemon.Containers(nil, &types.ContainerListOptions{ + containers, err := daemon.Containers(ctx, &types.ContainerListOptions{ Size: true, All: true, }) diff --git a/daemon/start.go b/daemon/start.go index f8b5e7ed2bf06..21385af074d6b 100644 --- a/daemon/start.go +++ b/daemon/start.go @@ -10,6 +10,7 @@ import ( containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" "github.com/docker/docker/errdefs" + v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -181,6 +182,12 @@ func (daemon *Daemon) containerStart(ctx context.Context, container *container.C if daemon.UsesSnapshotter() { newContainerOpts = append(newContainerOpts, containerd.WithSnapshotter(containerd.DefaultSnapshotter)) newContainerOpts = append(newContainerOpts, containerd.WithSnapshot(container.ID)) + c8dImge, err := daemon.imageService.(containerdImage).GetContainerdImage(ctx, container.Config.Image, &v1.Platform{}) + if err != nil { + return err + } + ctrdimg := containerd.NewImage(daemon.containerdCli, c8dImge) + newContainerOpts = append(newContainerOpts, containerd.WithImage(ctrdimg)) } err = daemon.containerd.Create(ctx, container.ID, spec, shim, createOptions, newContainerOpts...) From 99655cc01035ff599216449d1108430a7edeed16 Mon Sep 17 00:00:00 2001 From: Djordje Lukic Date: Wed, 3 Aug 2022 11:20:54 +0200 Subject: [PATCH 29/90] Make the snapshotter configurable Signed-off-by: Djordje Lukic --- daemon/config/config.go | 4 ++++ daemon/containerd/image_exporter.go | 4 ++-- daemon/containerd/image_list.go | 2 +- daemon/containerd/image_pull.go | 4 ++-- daemon/containerd/service.go | 18 ++++++++++-------- daemon/create.go | 2 +- daemon/daemon.go | 3 ++- daemon/oci_linux.go | 14 +++++++++++--- daemon/start.go | 2 +- 9 files changed, 34 insertions(+), 19 deletions(-) diff --git a/daemon/config/config.go b/daemon/config/config.go index 645beb8c032f6..25fc5091aba1c 100644 --- a/daemon/config/config.go +++ b/daemon/config/config.go @@ -10,6 +10,7 @@ import ( "strings" "sync" + "github.com/containerd/containerd" "github.com/docker/docker/opts" "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/registry" @@ -51,6 +52,9 @@ const ( // DefaultPluginNamespace is the name of the default containerd namespace used for plugins. DefaultPluginNamespace = "plugins.moby" + // DefaultContainerdSnapshotter is the name of the default containerd snapshotter used for creating container root fs + DefaultContainerdSnapshotter = containerd.DefaultSnapshotter + // LinuxV2RuntimeName is the runtime used to specify the containerd v2 runc shim LinuxV2RuntimeName = "io.containerd.runc.v2" diff --git a/daemon/containerd/image_exporter.go b/daemon/containerd/image_exporter.go index 758f57c2af149..679f3b1c2ec24 100644 --- a/daemon/containerd/image_exporter.go +++ b/daemon/containerd/image_exporter.go @@ -31,14 +31,14 @@ func (i *ImageService) LoadImage(ctx context.Context, inTar io.ReadCloser, outSt for _, img := range imgs { platformImg := containerd.NewImageWithPlatform(i.client, img, platform) - unpacked, err := platformImg.IsUnpacked(ctx, containerd.DefaultSnapshotter) + unpacked, err := platformImg.IsUnpacked(ctx, i.snapshotter) if err != nil { logrus.WithError(err).WithField("image", img.Name).Error("IsUnpacked failed") continue } if !unpacked { - err := platformImg.Unpack(ctx, containerd.DefaultSnapshotter) + err := platformImg.Unpack(ctx, i.snapshotter) if err != nil { logrus.WithError(err).WithField("image", img.Name).Error("Failed to unpack image") return errors.Wrapf(err, "Failed to unpack image") diff --git a/daemon/containerd/image_list.go b/daemon/containerd/image_list.go index 05c040a3c7caf..0818d34533478 100644 --- a/daemon/containerd/image_list.go +++ b/daemon/containerd/image_list.go @@ -37,7 +37,7 @@ func (i *ImageService) Images(ctx context.Context, opts types.ImageListOptions) return nil, err } - snapshotter := i.client.SnapshotService(containerd.DefaultSnapshotter) + snapshotter := i.client.SnapshotService(i.snapshotter) sizeCache := make(map[digest.Digest]int64) snapshotSizeFn := func(d digest.Digest) (int64, error) { if s, ok := sizeCache[d]; ok { diff --git a/daemon/containerd/image_pull.go b/daemon/containerd/image_pull.go index bcc6e9ff7e48b..dff6bd0a29b0b 100644 --- a/daemon/containerd/image_pull.go +++ b/daemon/containerd/image_pull.go @@ -62,13 +62,13 @@ func (i *ImageService) PullImage(ctx context.Context, image, tagOrDigest string, return err } - unpacked, err := img.IsUnpacked(ctx, containerd.DefaultSnapshotter) + unpacked, err := img.IsUnpacked(ctx, i.snapshotter) if err != nil { return err } if !unpacked { - if err := img.Unpack(ctx, containerd.DefaultSnapshotter); err != nil { + if err := img.Unpack(ctx, i.snapshotter); err != nil { return err } } diff --git a/daemon/containerd/service.go b/daemon/containerd/service.go index a2a80e78bc88f..514ce66c145c7 100644 --- a/daemon/containerd/service.go +++ b/daemon/containerd/service.go @@ -20,16 +20,18 @@ import ( // ImageService implements daemon.ImageService type ImageService struct { - client *containerd.Client - usage singleflight.Group - containers container.Store + client *containerd.Client + usage singleflight.Group + containers container.Store + snapshotter string } // NewService creates a new ImageService. -func NewService(c *containerd.Client, containers container.Store) *ImageService { +func NewService(c *containerd.Client, containers container.Store, snapshotter string) *ImageService { return &ImageService{ - client: c, - containers: containers, + client: c, + containers: containers, + snapshotter: snapshotter, } } @@ -107,7 +109,7 @@ func (i *ImageService) ReleaseLayer(rwlayer layer.RWLayer) error { func (i *ImageService) LayerDiskUsage(ctx context.Context) (int64, error) { ch := i.usage.DoChan("LayerDiskUsage", func() (interface{}, error) { var allLayersSize int64 - snapshotter := i.client.SnapshotService(containerd.DefaultSnapshotter) + snapshotter := i.client.SnapshotService(i.snapshotter) snapshotter.Walk(ctx, func(ctx context.Context, info snapshots.Info) error { usage, err := snapshotter.Usage(ctx, info.Name) if err != nil { @@ -168,7 +170,7 @@ func (i *ImageService) GetLayerFolders(img *image.Image, rwLayer layer.RWLayer) // GetContainerLayerSize returns the real size & virtual size of the container. func (i *ImageService) GetContainerLayerSize(ctx context.Context, containerID string) (int64, int64, error) { - snapshotter := i.client.SnapshotService(containerd.DefaultSnapshotter) + snapshotter := i.client.SnapshotService(i.snapshotter) sizeCache := make(map[digest.Digest]int64) snapshotSizeFn := func(d digest.Digest) (int64, error) { if s, ok := sizeCache[d]; ok { diff --git a/daemon/create.go b/daemon/create.go index 5b2ec16c0d6f1..f75bd0bc124c7 100644 --- a/daemon/create.go +++ b/daemon/create.go @@ -189,7 +189,7 @@ func (daemon *Daemon) create(ctx context.Context, opts createOpts) (retC *contai return nil, err } parent := identity.ChainID(diffIDs).String() - s := daemon.containerdCli.SnapshotService(containerd.DefaultSnapshotter) + s := daemon.containerdCli.SnapshotService(daemon.graphDriver) if _, err := s.Prepare(ctx, ctr.ID, parent); err != nil { return nil, err } diff --git a/daemon/daemon.go b/daemon/daemon.go index 45e2f0f675d38..5baf464489afa 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -1027,7 +1027,8 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S d.linkIndex = newLinkIndex() if d.UsesSnapshotter() { - d.imageService = ctrd.NewService(d.containerdCli, d.containers) + d.graphDriver = d.configStore.GraphDriver + d.imageService = ctrd.NewService(d.containerdCli, d.containers, d.graphDriver) } else { ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) if err != nil { diff --git a/daemon/oci_linux.go b/daemon/oci_linux.go index 8a61da2c2cebb..5bdd8dccfcd10 100644 --- a/daemon/oci_linux.go +++ b/daemon/oci_linux.go @@ -11,7 +11,6 @@ import ( "strings" cdcgroups "github.com/containerd/cgroups" - "github.com/containerd/containerd" "github.com/containerd/containerd/containers" coci "github.com/containerd/containerd/oci" "github.com/containerd/containerd/pkg/apparmor" @@ -1061,10 +1060,19 @@ func (daemon *Daemon) createSpec(ctx context.Context, c *container.Container) (r if daemon.configStore.Rootless { opts = append(opts, WithRootless(daemon)) } + + snapshotter := "" + snapshotKey := "" + if daemon.UsesSnapshotter() { + snapshotter = daemon.graphDriver + snapshotKey = c.ID + + } + return &s, coci.ApplyOpts(context.Background(), nil, &containers.Container{ ID: c.ID, - Snapshotter: containerd.DefaultSnapshotter, - SnapshotKey: c.ID, + Snapshotter: snapshotter, + SnapshotKey: snapshotKey, }, &s, opts...) } diff --git a/daemon/start.go b/daemon/start.go index 21385af074d6b..f3318618219b6 100644 --- a/daemon/start.go +++ b/daemon/start.go @@ -180,7 +180,7 @@ func (daemon *Daemon) containerStart(ctx context.Context, container *container.C newContainerOpts := []containerd.NewContainerOpts{} if daemon.UsesSnapshotter() { - newContainerOpts = append(newContainerOpts, containerd.WithSnapshotter(containerd.DefaultSnapshotter)) + newContainerOpts = append(newContainerOpts, containerd.WithSnapshotter(daemon.graphDriver)) newContainerOpts = append(newContainerOpts, containerd.WithSnapshot(container.ID)) c8dImge, err := daemon.imageService.(containerdImage).GetContainerdImage(ctx, container.Config.Image, &v1.Platform{}) if err != nil { From e645f7303786010956853e3d6912070a958fb916 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Gronowski?= Date: Fri, 5 Aug 2022 18:09:59 +0200 Subject: [PATCH 30/90] c8d/daemon: Treat (storage/graph)Driver as snapshotter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Also moved some layerStore related initialization to the non-c8d case because otherwise they get treated as a graphdriver plugins. Co-authored-by: Sebastiaan van Stijn Signed-off-by: Paweł Gronowski Signed-off-by: Sebastiaan van Stijn --- daemon/containerd/service.go | 2 +- daemon/containerd/snapshotters.go | 19 ++++ daemon/containerd/snapshotters_test.go | 58 ++++++++++++ daemon/daemon.go | 119 +++++++++++++------------ daemon/daemon_unix.go | 2 +- 5 files changed, 140 insertions(+), 60 deletions(-) create mode 100644 daemon/containerd/snapshotters.go create mode 100644 daemon/containerd/snapshotters_test.go diff --git a/daemon/containerd/service.go b/daemon/containerd/service.go index 514ce66c145c7..563e9a00b29e8 100644 --- a/daemon/containerd/service.go +++ b/daemon/containerd/service.go @@ -95,7 +95,7 @@ func (i *ImageService) Cleanup() error { // - newContainer // - to report an error in Daemon.Mount(container) func (i *ImageService) GraphDriverName() string { - return "containerd-snapshotter" + return i.snapshotter } // ReleaseLayer releases a layer allowing it to be removed diff --git a/daemon/containerd/snapshotters.go b/daemon/containerd/snapshotters.go new file mode 100644 index 0000000000000..bd9e4766827a7 --- /dev/null +++ b/daemon/containerd/snapshotters.go @@ -0,0 +1,19 @@ +package containerd + +import "github.com/containerd/containerd" + +// SnapshotterFromGraphDriver returns the containerd snapshotter name based on +// the supplied graphdriver name. It handles both legacy names and translates +// them into corresponding containerd snapshotter names. +func SnapshotterFromGraphDriver(graphDriver string) string { + switch graphDriver { + case "overlay", "overlay2": + return "overlayfs" + case "windowsfilter": + return "windows" + case "": + return containerd.DefaultSnapshotter + default: + return graphDriver + } +} diff --git a/daemon/containerd/snapshotters_test.go b/daemon/containerd/snapshotters_test.go new file mode 100644 index 0000000000000..9d05794bb5cfa --- /dev/null +++ b/daemon/containerd/snapshotters_test.go @@ -0,0 +1,58 @@ +package containerd + +import ( + "testing" + + "github.com/containerd/containerd" + "gotest.tools/v3/assert" +) + +func TestSnapshotterFromGraphDriver(t *testing.T) { + testCases := []struct { + desc string + input string + expected string + }{ + { + desc: "empty defaults to containerd default", + input: "", + expected: containerd.DefaultSnapshotter, + }, + { + desc: "overlay -> overlayfs", + input: "overlay", + expected: "overlayfs", + }, + { + desc: "overlay2 -> overlayfs", + input: "overlay2", + expected: "overlayfs", + }, + { + desc: "windowsfilter -> windows", + input: "windowsfilter", + expected: "windows", + }, + { + desc: "containerd overlayfs", + input: "overlayfs", + expected: "overlayfs", + }, + { + desc: "containerd zfs", + input: "zfs", + expected: "zfs", + }, + { + desc: "unknown is unchanged", + input: "somefuturesnapshotter", + expected: "somefuturesnapshotter", + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.desc, func(t *testing.T) { + assert.Equal(t, SnapshotterFromGraphDriver(tc.input), tc.expected) + }) + } +} diff --git a/daemon/daemon.go b/daemon/daemon.go index 5baf464489afa..e59be1edd5b5a 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -838,21 +838,6 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S } } - if isWindows { - // On Windows we don't support the environment variable, or a user supplied graphdriver - d.graphDriver = "windowsfilter" - } else { - // Unix platforms however run a single graphdriver for all containers, and it can - // be set through an environment variable, a daemon start parameter, or chosen through - // initialization of the layerstore through driver priority order for example. - if drv := os.Getenv("DOCKER_DRIVER"); drv != "" { - d.graphDriver = drv - logrus.Infof("Setting the storage driver from the $DOCKER_DRIVER environment variable (%s)", drv) - } else { - d.graphDriver = config.GraphDriver // May still be empty. Layerstore init determines instead. - } - } - d.registryService = registryService logger.RegisterPluginGetter(d.PluginStore) @@ -942,30 +927,6 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S return nil, err } - layerStore, err := layer.NewStoreFromOptions(layer.StoreOptions{ - Root: config.Root, - MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), - GraphDriver: d.graphDriver, - GraphDriverOptions: config.GraphOptions, - IDMapping: idMapping, - PluginGetter: d.PluginStore, - ExperimentalEnabled: config.Experimental, - }) - if err != nil { - return nil, err - } - - // As layerstore initialization may set the driver - d.graphDriver = layerStore.DriverName() - - // Configure and validate the kernels security support. Note this is a Linux/FreeBSD - // operation only, so it is safe to pass *just* the runtime OS graphdriver. - if err := configureKernelSecuritySupport(config, d.graphDriver); err != nil { - return nil, err - } - - imageRoot := filepath.Join(config.Root, "image", d.graphDriver) - d.volumes, err = volumesservice.NewVolumeService(config.Root, d.PluginStore, rootIDs, d) if err != nil { return nil, err @@ -979,23 +940,6 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S logrus.WithError(err).Warnf("unable to migrate engine ID; a new engine ID will be generated") } - // We have a single tag/reference store for the daemon globally. However, it's - // stored under the graphdriver. On host platforms which only support a single - // container OS, but multiple selectable graphdrivers, this means depending on which - // graphdriver is chosen, the global reference store is under there. For - // platforms which support multiple container operating systems, this is slightly - // more problematic as where does the global ref store get located? Fortunately, - // for Windows, which is currently the only daemon supporting multiple container - // operating systems, the list of graphdrivers available isn't user configurable. - // For backwards compatibility, we just put it under the windowsfilter - // directory regardless. - refStoreLocation := filepath.Join(imageRoot, `repositories.json`) - rs, err := refstore.NewReferenceStore(refStoreLocation) - if err != nil { - return nil, fmt.Errorf("Couldn't create reference store repository: %s", err) - } - d.ReferenceStore = rs - // Check if Devices cgroup is mounted, it is hard requirement for container security, // on Linux. // @@ -1026,15 +970,74 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S d.linkIndex = newLinkIndex() + // On Windows we don't support the environment variable, or a user supplied graphdriver + // Unix platforms however run a single graphdriver for all containers, and it can + // be set through an environment variable, a daemon start parameter, or chosen through + // initialization of the layerstore through driver priority order for example. + graphDriver := os.Getenv("DOCKER_DRIVER") + if isWindows { + graphDriver = "windowsfilter" + } else if graphDriver != "" { + logrus.Infof("Setting the storage driver from the $DOCKER_DRIVER environment variable (%s)", graphDriver) + } else { + graphDriver = config.GraphDriver + } + if d.UsesSnapshotter() { - d.graphDriver = d.configStore.GraphDriver - d.imageService = ctrd.NewService(d.containerdCli, d.containers, d.graphDriver) + snapshotter := ctrd.SnapshotterFromGraphDriver(graphDriver) + // Configure and validate the kernels security support. Note this is a Linux/FreeBSD + // operation only, so it is safe to pass *just* the runtime OS graphdriver. + if err := configureKernelSecuritySupport(config, snapshotter); err != nil { + return nil, err + } + d.imageService = ctrd.NewService(d.containerdCli, d.containers, snapshotter) + d.graphDriver = snapshotter } else { + layerStore, err := layer.NewStoreFromOptions(layer.StoreOptions{ + Root: config.Root, + MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), + GraphDriver: graphDriver, + GraphDriverOptions: config.GraphOptions, + IDMapping: idMapping, + PluginGetter: d.PluginStore, + ExperimentalEnabled: config.Experimental, + }) + if err != nil { + return nil, err + } + + // As layerstore initialization may set the driver + d.graphDriver = layerStore.DriverName() + + // Configure and validate the kernels security support. Note this is a Linux/FreeBSD + // operation only, so it is safe to pass *just* the runtime OS graphdriver. + if err := configureKernelSecuritySupport(config, d.graphDriver); err != nil { + return nil, err + } + + imageRoot := filepath.Join(config.Root, "image", d.graphDriver) ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) if err != nil { return nil, err } + // We have a single tag/reference store for the daemon globally. However, it's + // stored under the graphdriver. On host platforms which only support a single + // container OS, but multiple selectable graphdrivers, this means depending on which + // graphdriver is chosen, the global reference store is under there. For + // platforms which support multiple container operating systems, this is slightly + // more problematic as where does the global ref store get located? Fortunately, + // for Windows, which is currently the only daemon supporting multiple container + // operating systems, the list of graphdrivers available isn't user configurable. + // For backwards compatibility, we just put it under the windowsfilter + // directory regardless. + refStoreLocation := filepath.Join(imageRoot, `repositories.json`) + rs, err := refstore.NewReferenceStore(refStoreLocation) + if err != nil { + return nil, fmt.Errorf("Couldn't create reference store repository: %s", err) + } + d.ReferenceStore = rs + imageStore, err := image.NewImageStore(ifs, layerStore) if err != nil { return nil, err diff --git a/daemon/daemon_unix.go b/daemon/daemon_unix.go index e33bc8383d91d..b3c126353d667 100644 --- a/daemon/daemon_unix.go +++ b/daemon/daemon_unix.go @@ -820,7 +820,7 @@ func configureKernelSecuritySupport(config *config.Config, driverName string) er return nil } - if driverName == "overlay" || driverName == "overlay2" { + if driverName == "overlay" || driverName == "overlay2" || driverName == "overlayfs" { // If driver is overlay or overlay2, make sure kernel // supports selinux with overlay. supported, err := overlaySupportsSelinux() From ca9d2bbedba65b26331f61c13d3ca9b6875453dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Gronowski?= Date: Thu, 4 Aug 2022 11:06:45 +0200 Subject: [PATCH 31/90] c8d/pull: Add options for stargz/nydus snapshotters MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Paweł Gronowski --- daemon/containerd/image_pull.go | 21 ++ vendor.mod | 5 +- vendor.sum | 5 +- .../containerd/nydus-snapshotter/LICENSE | 202 ++++++++++++++++++ .../nydus-snapshotter/pkg/label/handler.go | 59 +++++ .../nydus-snapshotter/pkg/label/label.go | 41 ++++ .../stargz-snapshotter/estargz/build.go | 44 +++- .../stargz-snapshotter/estargz/estargz.go | 5 +- .../stargz-snapshotter/estargz/testutil.go | 5 +- .../stargz-snapshotter/fs/config/config.go | 94 ++++++++ .../stargz-snapshotter/fs/source/source.go | 202 ++++++++++++++++++ vendor/modules.txt | 7 +- 12 files changed, 675 insertions(+), 15 deletions(-) create mode 100644 vendor/github.com/containerd/nydus-snapshotter/LICENSE create mode 100644 vendor/github.com/containerd/nydus-snapshotter/pkg/label/handler.go create mode 100644 vendor/github.com/containerd/nydus-snapshotter/pkg/label/label.go create mode 100644 vendor/github.com/containerd/stargz-snapshotter/fs/config/config.go create mode 100644 vendor/github.com/containerd/stargz-snapshotter/fs/source/source.go diff --git a/daemon/containerd/image_pull.go b/daemon/containerd/image_pull.go index dff6bd0a29b0b..d754b862bf874 100644 --- a/daemon/containerd/image_pull.go +++ b/daemon/containerd/image_pull.go @@ -7,6 +7,8 @@ import ( "github.com/containerd/containerd" "github.com/containerd/containerd/images" "github.com/containerd/containerd/platforms" + nyduslabel "github.com/containerd/nydus-snapshotter/pkg/label" + stargzsource "github.com/containerd/stargz-snapshotter/fs/source" "github.com/docker/distribution" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types/registry" @@ -53,6 +55,7 @@ func (i *ImageService) PullImage(ctx context.Context, image, tagOrDigest string, return nil, nil }) opts = append(opts, containerd.WithImageHandler(h)) + opts = i.applySnapshotterOpts(opts, ref) finishProgress := showProgress(ctx, jobs, outStream, pullProgress(i.client.ContentStore())) defer finishProgress() @@ -79,3 +82,21 @@ func (i *ImageService) PullImage(ctx context.Context, image, tagOrDigest string, func (i *ImageService) GetRepository(ctx context.Context, ref reference.Named, authConfig *registry.AuthConfig) (distribution.Repository, error) { panic("not implemented") } + +func (i *ImageService) applySnapshotterOpts(opts []containerd.RemoteOpt, ref reference.Named) []containerd.RemoteOpt { + opts = append(opts, containerd.WithPullUnpack) + opts = append(opts, containerd.WithPullSnapshotter(i.snapshotter)) + + var wrapper func(images.Handler) images.Handler + switch i.snapshotter { + case "stargz": + const prefetch int64 = 10 * 1024 * 1024 // 10MiB + wrapper = stargzsource.AppendDefaultLabelsHandlerWrapper(ref.String(), prefetch) + case "nydus": + wrapper = nyduslabel.AppendLabelsHandlerWrapper(ref.String()) + } + if wrapper != nil { + opts = append(opts, containerd.WithImageHandlerWrapper(wrapper)) + } + return opts +} diff --git a/vendor.mod b/vendor.mod index 8dd540ada5701..8e4b4fd0465f2 100644 --- a/vendor.mod +++ b/vendor.mod @@ -23,6 +23,8 @@ require ( github.com/containerd/containerd v1.6.6 github.com/containerd/continuity v0.3.0 github.com/containerd/fifo v1.0.0 + github.com/containerd/nydus-snapshotter v0.3.0-alpha.5 + github.com/containerd/stargz-snapshotter v0.11.3 github.com/containerd/typeurl v1.0.2 github.com/coreos/go-systemd/v22 v22.3.2 github.com/creack/pty v1.1.11 @@ -100,8 +102,7 @@ require ( github.com/containerd/console v1.0.3 // indirect github.com/containerd/go-cni v1.1.6 // indirect github.com/containerd/go-runc v1.0.0 // indirect - github.com/containerd/stargz-snapshotter v0.11.3 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.11.3 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.11.4 // indirect github.com/containerd/ttrpc v1.1.0 // indirect github.com/containernetworking/cni v1.1.1 // indirect github.com/cyphar/filepath-securejoin v0.2.3 // indirect diff --git a/vendor.sum b/vendor.sum index a0e38ba6ba9ca..1bee329ccd320 100644 --- a/vendor.sum +++ b/vendor.sum @@ -280,11 +280,14 @@ github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/nydus-snapshotter v0.3.0-alpha.5 h1:r6x2KjHqlcTiLUEZi8C5S3irTGpqj+1Y/9wdOHsLaVM= +github.com/containerd/nydus-snapshotter v0.3.0-alpha.5/go.mod h1:zBuYaCIt5l9DBvX89EQBrbp7K8wd2tvpR51KTxodtF8= github.com/containerd/stargz-snapshotter v0.11.3 h1:D3PoF563XmOBdtfx2G6AkhbHueqwIVPBFn2mrsWLa3w= github.com/containerd/stargz-snapshotter v0.11.3/go.mod h1:2j2EAUyvrLU4D9unYlTIwGhDKQIk74KJ9E71lJsQCVM= github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= -github.com/containerd/stargz-snapshotter/estargz v0.11.3 h1:k2kN16Px6LYuv++qFqK+JTcYqc8bEVxzGpf8/gFBL5M= github.com/containerd/stargz-snapshotter/estargz v0.11.3/go.mod h1:7vRJIcImfY8bpifnMjt+HTJoQxASq7T28MYbP15/Nf0= +github.com/containerd/stargz-snapshotter/estargz v0.11.4 h1:LjrYUZpyOhiSaU7hHrdR82/RBoxfGWSaC0VeSSMXqnk= +github.com/containerd/stargz-snapshotter/estargz v0.11.4/go.mod h1:7vRJIcImfY8bpifnMjt+HTJoQxASq7T28MYbP15/Nf0= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= diff --git a/vendor/github.com/containerd/nydus-snapshotter/LICENSE b/vendor/github.com/containerd/nydus-snapshotter/LICENSE new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/vendor/github.com/containerd/nydus-snapshotter/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/label/handler.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/label/handler.go new file mode 100644 index 0000000000000..6e407658be505 --- /dev/null +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/label/handler.go @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2022. Ant Group. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package label + +import ( + "context" + "fmt" + "strings" + + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/labels" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// AppendLabelsHandlerWrapper returns a function which can wrap a handler by appending +// image's basic information to each layer descriptor as annotations during unpack. +// These annotations will be passed to this nydus snapshotter as labels. +func AppendLabelsHandlerWrapper(ref string) func(f images.Handler) images.Handler { + return func(f images.Handler) images.Handler { + return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f.Handle(ctx, desc) + if err != nil { + return nil, err + } + switch desc.MediaType { + case ocispec.MediaTypeImageManifest, images.MediaTypeDockerSchema2Manifest: + for i := range children { + c := &children[i] + if images.IsLayerType(c.MediaType) { + if c.Annotations == nil { + c.Annotations = make(map[string]string) + } + var layers string + for _, l := range children[i:] { + if images.IsLayerType(l.MediaType) { + ls := fmt.Sprintf("%s,", l.Digest.String()) + // This avoids the label hits the size limitation. + // Skipping layers is allowed here and only affects performance. + if err := labels.Validate(CRIImageLayers, layers+ls); err != nil { + break + } + layers += ls + } + } + c.Annotations[CRIImageLayers] = strings.TrimSuffix(layers, ",") + c.Annotations[CRIImageRef] = ref + c.Annotations[CRILayerDigest] = c.Digest.String() + c.Annotations[CRIManifestDigest] = desc.Digest.String() + } + } + } + return children, nil + }) + } +} diff --git a/vendor/github.com/containerd/nydus-snapshotter/pkg/label/label.go b/vendor/github.com/containerd/nydus-snapshotter/pkg/label/label.go new file mode 100644 index 0000000000000..b1dcdf5c5c1fe --- /dev/null +++ b/vendor/github.com/containerd/nydus-snapshotter/pkg/label/label.go @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2020. Ant Group. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +package label + +const ( + CRIImageRef = "containerd.io/snapshot/cri.image-ref" + CRIImageLayers = "containerd.io/snapshot/cri.image-layers" + CRILayerDigest = "containerd.io/snapshot/cri.layer-digest" + CRIManifestDigest = "containerd.io/snapshot/cri.manifest-digest" + + // Marker for remote snapshotter to handle the pull request. + // During image pull, the containerd client calls Prepare API with the label containerd.io/snapshot.ref. + // This is a containerd-defined label which contains ChainID that targets a committed snapshot that the + // client is trying to prepare. + TargetSnapshotRef = "containerd.io/snapshot.ref" + + // Annotation containing ids of data blobs referenced by the image, set by image builders. + NydusDataBlobIDs = "containerd.io/snapshot/nydus-blob-ids" + // A bool flag to mark the blob as a Nydus data blob, set by image builders. + NydusDataLayer = "containerd.io/snapshot/nydus-blob" + // A bool flag to mark the blob as a nydus bootstrap, set by image builders. + NydusMetaLayer = "containerd.io/snapshot/nydus-bootstrap" + // Annotation containing secret to pull images from registry, set by the snapshotter. + NydusImagePullSecret = "containerd.io/snapshot/pullsecret" + // Annotation containing username to pull images from registry, set by the snapshotter. + NydusImagePullUsername = "containerd.io/snapshot/pullusername" + // A bool flag to enable integrity verification of meta data blob + NydusSignature = "containerd.io/snapshot/nydus-signature" + + // A bool flag to mark the blob as a estargz data blob, set by the snapshotter. + StargzLayer = "containerd.io/snapshot/stargz" + + // volatileOpt is a key of an optional lablel to each snapshot. + // If this optional label of a snapshot is specified, when mounted to rootdir + // this snapshot will include volatile option + OverlayfsVolatileOpt = "containerd.io/snapshot/overlay.volatile" +) diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go index 9ee97fc911053..0da3efe4c21e5 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go @@ -26,10 +26,10 @@ import ( "archive/tar" "bytes" "compress/gzip" + "context" "errors" "fmt" "io" - "io/ioutil" "os" "path" "runtime" @@ -48,6 +48,7 @@ type options struct { prioritizedFiles []string missedPrioritizedFiles *[]string compression Compression + ctx context.Context } type Option func(o *options) error @@ -104,6 +105,14 @@ func WithCompression(compression Compression) Option { } } +// WithContext specifies a context that can be used for clean canceleration. +func WithContext(ctx context.Context) Option { + return func(o *options) error { + o.ctx = ctx + return nil + } +} + // Blob is an eStargz blob. type Blob struct { io.ReadCloser @@ -139,12 +148,29 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { opts.compression = newGzipCompressionWithLevel(opts.compressionLevel) } layerFiles := newTempFiles() + ctx := opts.ctx + if ctx == nil { + ctx = context.Background() + } + done := make(chan struct{}) + defer close(done) + go func() { + select { + case <-done: + // nop + case <-ctx.Done(): + layerFiles.CleanupAll() + } + }() defer func() { if rErr != nil { if err := layerFiles.CleanupAll(); err != nil { rErr = fmt.Errorf("failed to cleanup tmp files: %v: %w", err, rErr) } } + if cErr := ctx.Err(); cErr != nil { + rErr = fmt.Errorf("error from context %q: %w", cErr, rErr) + } }() tarBlob, err := decompressBlob(tarBlob, layerFiles) if err != nil { @@ -506,12 +532,13 @@ func newTempFiles() *tempFiles { } type tempFiles struct { - files []*os.File - filesMu sync.Mutex + files []*os.File + filesMu sync.Mutex + cleanupOnce sync.Once } func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) { - f, err := ioutil.TempFile(dir, pattern) + f, err := os.CreateTemp(dir, pattern) if err != nil { return nil, err } @@ -521,7 +548,14 @@ func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) { return f, nil } -func (tf *tempFiles) CleanupAll() error { +func (tf *tempFiles) CleanupAll() (err error) { + tf.cleanupOnce.Do(func() { + err = tf.cleanupAll() + }) + return +} + +func (tf *tempFiles) cleanupAll() error { tf.filesMu.Lock() defer tf.filesMu.Unlock() var allErr []error diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go index 4b655c14532fc..921e59ec6efc8 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go @@ -31,7 +31,6 @@ import ( "fmt" "hash" "io" - "io/ioutil" "os" "path" "sort" @@ -579,7 +578,7 @@ func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) { return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err) } defer dr.Close() - if n, err := io.CopyN(ioutil.Discard, dr, off); n != off || err != nil { + if n, err := io.CopyN(io.Discard, dr, off); n != off || err != nil { return 0, fmt.Errorf("discard of %d bytes = %v, %v", off, n, err) } return io.ReadFull(dr, p) @@ -933,7 +932,7 @@ func (w *Writer) appendTar(r io.Reader, lossless bool) error { } } } - remainDest := ioutil.Discard + remainDest := io.Discard if lossless { remainDest = dst // Preserve the remaining bytes in lossless mode } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go index 1de13a4705be7..8f27dfb3ea27e 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -31,7 +31,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "os" "reflect" "sort" @@ -287,11 +286,11 @@ func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool { return false } - aFile, err := ioutil.ReadAll(aTar) + aFile, err := io.ReadAll(aTar) if err != nil { t.Fatal("failed to read tar payload of A") } - bFile, err := ioutil.ReadAll(bTar) + bFile, err := io.ReadAll(bTar) if err != nil { t.Fatal("failed to read tar payload of B") } diff --git a/vendor/github.com/containerd/stargz-snapshotter/fs/config/config.go b/vendor/github.com/containerd/stargz-snapshotter/fs/config/config.go new file mode 100644 index 0000000000000..5550c2fd5e2d5 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/fs/config/config.go @@ -0,0 +1,94 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the NOTICE.md file. +*/ + +package config + +const ( + // TargetSkipVerifyLabel is a snapshot label key that indicates to skip content + // verification for the layer. + TargetSkipVerifyLabel = "containerd.io/snapshot/remote/stargz.skipverify" + + // TargetPrefetchSizeLabel is a snapshot label key that indicates size to prefetch + // the layer. If the layer is eStargz and contains prefetch landmarks, these config + // will be respeced. + TargetPrefetchSizeLabel = "containerd.io/snapshot/remote/stargz.prefetch" +) + +type Config struct { + HTTPCacheType string `toml:"http_cache_type"` + FSCacheType string `toml:"filesystem_cache_type"` + // ResolveResultEntryTTLSec is TTL (in sec) to cache resolved layers for + // future use. (default 120s) + ResolveResultEntryTTLSec int `toml:"resolve_result_entry_ttl_sec"` + ResolveResultEntry int `toml:"resolve_result_entry"` // deprecated + PrefetchSize int64 `toml:"prefetch_size"` + PrefetchTimeoutSec int64 `toml:"prefetch_timeout_sec"` + NoPrefetch bool `toml:"noprefetch"` + NoBackgroundFetch bool `toml:"no_background_fetch"` + Debug bool `toml:"debug"` + AllowNoVerification bool `toml:"allow_no_verification"` + DisableVerification bool `toml:"disable_verification"` + MaxConcurrency int64 `toml:"max_concurrency"` + NoPrometheus bool `toml:"no_prometheus"` + + // BlobConfig is config for layer blob management. + BlobConfig `toml:"blob"` + + // DirectoryCacheConfig is config for directory-based cache. + DirectoryCacheConfig `toml:"directory_cache"` + + FuseConfig `toml:"fuse"` +} + +type BlobConfig struct { + ValidInterval int64 `toml:"valid_interval"` + CheckAlways bool `toml:"check_always"` + // ChunkSize is the granularity at which background fetch and on-demand reads + // are fetched from the remote registry. + ChunkSize int64 `toml:"chunk_size"` + FetchTimeoutSec int64 `toml:"fetching_timeout_sec"` + ForceSingleRangeMode bool `toml:"force_single_range_mode"` + // PrefetchChunkSize is the maximum bytes transferred per http GET from remote registry + // during prefetch. It is recommended to have PrefetchChunkSize > ChunkSize. + // If PrefetchChunkSize < ChunkSize prefetch bytes will be fetched as a single http GET, + // else total GET requests for prefetch = ceil(PrefetchSize / PrefetchChunkSize). + PrefetchChunkSize int64 `toml:"prefetch_chunk_size"` + + MaxRetries int `toml:"max_retries"` + MinWaitMSec int `toml:"min_wait_msec"` + MaxWaitMSec int `toml:"max_wait_msec"` +} + +type DirectoryCacheConfig struct { + MaxLRUCacheEntry int `toml:"max_lru_cache_entry"` + MaxCacheFds int `toml:"max_cache_fds"` + SyncAdd bool `toml:"sync_add"` + Direct bool `toml:"direct" default:"true"` +} + +type FuseConfig struct { + // AttrTimeout defines overall timeout attribute for a file system in seconds. + AttrTimeout int64 `toml:"attr_timeout"` + + // EntryTimeout defines TTL for directory, name lookup in seconds. + EntryTimeout int64 `toml:"entry_timeout"` +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/fs/source/source.go b/vendor/github.com/containerd/stargz-snapshotter/fs/source/source.go new file mode 100644 index 0000000000000..17b20ecde0205 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/fs/source/source.go @@ -0,0 +1,202 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package source + +import ( + "context" + "fmt" + "strings" + + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/labels" + "github.com/containerd/containerd/reference" + "github.com/containerd/containerd/remotes/docker" + "github.com/containerd/stargz-snapshotter/fs/config" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// GetSources is a function for converting snapshot labels into typed blob sources +// information. This package defines a default converter which provides source +// information based on some labels but implementations aren't required to use labels. +// Implementations are allowed to return several sources (registry config + image refs) +// about the blob. +type GetSources func(labels map[string]string) (source []Source, err error) + +// RegistryHosts returns a list of registries that provides the specified image. +type RegistryHosts func(reference.Spec) ([]docker.RegistryHost, error) + +// Source is a typed blob source information. This contains information about +// a blob stored in registries and some contexts of the blob. +type Source struct { + + // Hosts is a registry configuration where this blob is stored. + Hosts RegistryHosts + + // Name is an image reference which contains this blob. + Name reference.Spec + + // Target is a descriptor of this blob. + Target ocispec.Descriptor + + // Manifest is an image manifest which contains the blob. This will + // be used by the filesystem to pre-resolve some layers contained in + // the manifest. + // Currently, only layer digests (Manifest.Layers.Digest) will be used. + Manifest ocispec.Manifest +} + +const ( + // targetRefLabel is a label which contains image reference. + targetRefLabel = "containerd.io/snapshot/remote/stargz.reference" + + // targetDigestLabel is a label which contains layer digest. + targetDigestLabel = "containerd.io/snapshot/remote/stargz.digest" + + // targetImageLayersLabel is a label which contains layer digests contained in + // the target image. + targetImageLayersLabel = "containerd.io/snapshot/remote/stargz.layers" + + // targetImageURLsLabelPrefix is a label prefix which constructs a map from the layer index to + // urls of the layer descriptor. + targetImageURLsLabelPrefix = "containerd.io/snapshot/remote/urls." + + // targetURsLLabel is a label which contains layer URL. This is only used to pass URL from containerd + // to snapshotter. + targetURLsLabel = "containerd.io/snapshot/remote/urls" +) + +// FromDefaultLabels returns a function for converting snapshot labels to +// source information based on labels. +func FromDefaultLabels(hosts RegistryHosts) GetSources { + return func(labels map[string]string) ([]Source, error) { + refStr, ok := labels[targetRefLabel] + if !ok { + return nil, fmt.Errorf("reference hasn't been passed") + } + refspec, err := reference.Parse(refStr) + if err != nil { + return nil, err + } + + digestStr, ok := labels[targetDigestLabel] + if !ok { + return nil, fmt.Errorf("digest hasn't been passed") + } + target, err := digest.Parse(digestStr) + if err != nil { + return nil, err + } + + var neighboringLayers []ocispec.Descriptor + if l, ok := labels[targetImageLayersLabel]; ok { + layersStr := strings.Split(l, ",") + for i, l := range layersStr { + d, err := digest.Parse(l) + if err != nil { + return nil, err + } + if d.String() != target.String() { + desc := ocispec.Descriptor{Digest: d} + if urls, ok := labels[targetImageURLsLabelPrefix+fmt.Sprintf("%d", i)]; ok { + desc.URLs = strings.Split(urls, ",") + } + neighboringLayers = append(neighboringLayers, desc) + } + } + } + + targetDesc := ocispec.Descriptor{ + Digest: target, + Annotations: labels, + } + if targetURLs, ok := labels[targetURLsLabel]; ok { + targetDesc.URLs = append(targetDesc.URLs, strings.Split(targetURLs, ",")...) + } + + return []Source{ + { + Hosts: hosts, + Name: refspec, + Target: targetDesc, + Manifest: ocispec.Manifest{Layers: append([]ocispec.Descriptor{targetDesc}, neighboringLayers...)}, + }, + }, nil + } +} + +// AppendDefaultLabelsHandlerWrapper makes a handler which appends image's basic +// information to each layer descriptor as annotations during unpack. These +// annotations will be passed to this remote snapshotter as labels and used to +// construct source information. +func AppendDefaultLabelsHandlerWrapper(ref string, prefetchSize int64) func(f images.Handler) images.Handler { + return func(f images.Handler) images.Handler { + return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f.Handle(ctx, desc) + if err != nil { + return nil, err + } + switch desc.MediaType { + case ocispec.MediaTypeImageManifest, images.MediaTypeDockerSchema2Manifest: + for i := range children { + c := &children[i] + if images.IsLayerType(c.MediaType) { + if c.Annotations == nil { + c.Annotations = make(map[string]string) + } + c.Annotations[targetRefLabel] = ref + c.Annotations[targetDigestLabel] = c.Digest.String() + var layers string + for i, l := range children[i:] { + if images.IsLayerType(l.MediaType) { + ls := fmt.Sprintf("%s,", l.Digest.String()) + // This avoids the label hits the size limitation. + // Skipping layers is allowed here and only affects performance. + if err := labels.Validate(targetImageLayersLabel, layers+ls); err != nil { + break + } + layers += ls + + // Store URLs of the neighbouring layer as well. + urlsKey := targetImageURLsLabelPrefix + fmt.Sprintf("%d", i) + c.Annotations[urlsKey] = appendWithValidation(urlsKey, l.URLs) + } + } + c.Annotations[targetImageLayersLabel] = strings.TrimSuffix(layers, ",") + c.Annotations[config.TargetPrefetchSizeLabel] = fmt.Sprintf("%d", prefetchSize) + + // store URL in annotation to let containerd to pass it to the snapshotter + c.Annotations[targetURLsLabel] = appendWithValidation(targetURLsLabel, c.URLs) + } + } + } + return children, nil + }) + } +} + +func appendWithValidation(key string, values []string) string { + var v string + for _, u := range values { + s := fmt.Sprintf("%s,", u) + if err := labels.Validate(key, v+s); err != nil { + break + } + v += s + } + return strings.TrimSuffix(v, ",") +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 542dbc249c834..ed51088e0f837 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -251,10 +251,15 @@ github.com/containerd/go-cni # github.com/containerd/go-runc v1.0.0 ## explicit; go 1.13 github.com/containerd/go-runc +# github.com/containerd/nydus-snapshotter v0.3.0-alpha.5 +## explicit; go 1.17 +github.com/containerd/nydus-snapshotter/pkg/label # github.com/containerd/stargz-snapshotter v0.11.3 ## explicit; go 1.16 +github.com/containerd/stargz-snapshotter/fs/config +github.com/containerd/stargz-snapshotter/fs/source github.com/containerd/stargz-snapshotter/snapshot/overlayutils -# github.com/containerd/stargz-snapshotter/estargz v0.11.3 +# github.com/containerd/stargz-snapshotter/estargz v0.11.4 ## explicit; go 1.16 github.com/containerd/stargz-snapshotter/estargz github.com/containerd/stargz-snapshotter/estargz/errorutil From 7d6639744dd465ab384ea85cbd133b804cc83830 Mon Sep 17 00:00:00 2001 From: Nicolas De Loof Date: Tue, 9 Aug 2022 14:28:29 +0200 Subject: [PATCH 32/90] let buildx know we support containerd snapshotter Signed-off-by: Nicolas De Loof --- builder/builder-next/controller.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/builder/builder-next/controller.go b/builder/builder-next/controller.go index 108adc9a32993..227ba4ea069a3 100644 --- a/builder/builder-next/controller.go +++ b/builder/builder-next/controller.go @@ -78,7 +78,9 @@ func newSnapshotterController(rt http.RoundTripper, opt Opt) (*mobycontrol.Contr snapshotter := ctd.DefaultSnapshotter wo, err := containerd.NewWorkerOpt(opt.Root, opt.ContainerdAddress, snapshotter, opt.ContainerdNamespace, - opt.Rootless, map[string]string{}, dns, nc, opt.ApparmorProfile, nil, "", ctd.WithTimeout(60*time.Second)) + opt.Rootless, map[string]string{ + worker.LabelSnapshotter: snapshotter, + }, dns, nc, opt.ApparmorProfile, nil, "", ctd.WithTimeout(60*time.Second)) if err != nil { return nil, err } From d63a86b4b46cc9c378f5f60d157c0be9f9a6539f Mon Sep 17 00:00:00 2001 From: Djordje Lukic Date: Wed, 10 Aug 2022 11:44:01 +0200 Subject: [PATCH 33/90] Don't try to restore containers on restart with contaienrd This needs more work to make containerd the source of truth when it's used. It's safer for now to not try and restore any containers. Signed-off-by: Djordje Lukic --- daemon/daemon.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/daemon/daemon.go b/daemon/daemon.go index e59be1edd5b5a..e1c2e50f0f2bc 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -204,6 +204,11 @@ func (daemon *Daemon) RegistryHosts() docker.RegistryHosts { } func (daemon *Daemon) restore(ctx context.Context) error { + // Restoring containers after a restart is not yet supported + // when using the containerd content store. + if daemon.UsesSnapshotter() { + return nil + } var mapLock sync.Mutex containers := make(map[string]*container.Container) From ccb236dd36bb5a334daa4a77c5bf8ee3b0302889 Mon Sep 17 00:00:00 2001 From: Djordje Lukic Date: Wed, 10 Aug 2022 12:36:47 +0200 Subject: [PATCH 34/90] Pass the current snapshotter to the buildkit worker If buildkit uses a different snapshotter we can't list the images any more because we can't find the snapshot. Signed-off-by: Djordje Lukic --- builder/builder-next/builder.go | 1 + builder/builder-next/controller.go | 6 ++---- cmd/dockerd/daemon.go | 1 + 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/builder/builder-next/builder.go b/builder/builder-next/builder.go index 49796ecbc1b14..4b95a1ba0df84 100644 --- a/builder/builder-next/builder.go +++ b/builder/builder-next/builder.go @@ -78,6 +78,7 @@ type Opt struct { DNSConfig config.DNSConfig ApparmorProfile string UseSnapshotter bool + Snapshotter string ContainerdAddress string ContainerdNamespace string } diff --git a/builder/builder-next/controller.go b/builder/builder-next/controller.go index 227ba4ea069a3..7c1490fc73fa8 100644 --- a/builder/builder-next/controller.go +++ b/builder/builder-next/controller.go @@ -75,11 +75,9 @@ func newSnapshotterController(rt http.RoundTripper, opt Opt) (*mobycontrol.Contr } dns := getDNSConfig(opt.DNSConfig) - snapshotter := ctd.DefaultSnapshotter - - wo, err := containerd.NewWorkerOpt(opt.Root, opt.ContainerdAddress, snapshotter, opt.ContainerdNamespace, + wo, err := containerd.NewWorkerOpt(opt.Root, opt.ContainerdAddress, opt.Snapshotter, opt.ContainerdNamespace, opt.Rootless, map[string]string{ - worker.LabelSnapshotter: snapshotter, + worker.LabelSnapshotter: opt.Snapshotter, }, dns, nc, opt.ApparmorProfile, nil, "", ctd.WithTimeout(60*time.Second)) if err != nil { return nil, err diff --git a/cmd/dockerd/daemon.go b/cmd/dockerd/daemon.go index 6c173f261246e..fff71847271d7 100644 --- a/cmd/dockerd/daemon.go +++ b/cmd/dockerd/daemon.go @@ -307,6 +307,7 @@ func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, e DNSConfig: config.DNSConfig, ApparmorProfile: daemon.DefaultApparmorProfile(), UseSnapshotter: d.UsesSnapshotter(), + Snapshotter: d.ImageService().GraphDriverName(), ContainerdAddress: config.ContainerdAddr, ContainerdNamespace: config.ContainerdNamespace, }) From e89f6e6f29827f6276fd6eff59afe1332e8bef95 Mon Sep 17 00:00:00 2001 From: Djordje Lukic Date: Tue, 9 Aug 2022 09:40:35 +0200 Subject: [PATCH 35/90] Don't remove containerd's container after exit Keeping the containerd's container around while our container is active makes `docker start` possible. Signed-off-by: Djordje Lukic --- daemon/delete.go | 5 +++++ daemon/start.go | 46 +++++++++++++++++++++++++++++++++------------- 2 files changed, 38 insertions(+), 13 deletions(-) diff --git a/daemon/delete.go b/daemon/delete.go index db04705bef9ba..96e37b6dc7850 100644 --- a/daemon/delete.go +++ b/daemon/delete.go @@ -154,6 +154,11 @@ func (daemon *Daemon) cleanupContainer(container *container.Container, config ty for _, name := range linkNames { daemon.releaseName(name) } + if daemon.UsesSnapshotter() { + if err := daemon.containerd.Delete(context.Background(), container.ID); err != nil { + logrus.WithError(err).WithField("container", container.ID).Error("cleanup: failed to delete container from containerd") + } + } container.SetRemoved() stateCtr.del(container.ID) diff --git a/daemon/start.go b/daemon/start.go index 21385af074d6b..4ee14eafa1252 100644 --- a/daemon/start.go +++ b/daemon/start.go @@ -190,19 +190,37 @@ func (daemon *Daemon) containerStart(ctx context.Context, container *container.C newContainerOpts = append(newContainerOpts, containerd.WithImage(ctrdimg)) } - err = daemon.containerd.Create(ctx, container.ID, spec, shim, createOptions, newContainerOpts...) - if err != nil { - if errdefs.IsConflict(err) { - logrus.WithError(err).WithField("container", container.ID).Error("Container not cleaned up from containerd from previous run") - // best effort to clean up old container object - daemon.containerd.DeleteTask(context.Background(), container.ID) - if err := daemon.containerd.Delete(context.Background(), container.ID); err != nil && !errdefs.IsNotFound(err) { - logrus.WithError(err).WithField("container", container.ID).Error("Error cleaning up stale containerd container object") - } - err = daemon.containerd.Create(ctx, container.ID, spec, shim, createOptions) + createContainer := true + if daemon.UsesSnapshotter() { + // When using the containerd snapshotters we want to reuse the existing containerd container + _, err := daemon.containerdCli.LoadContainer(ctx, container.ID) + if err == nil { + createContainer = false } + } + + if createContainer { + err = daemon.containerd.Create(ctx, container.ID, spec, shim, createOptions, newContainerOpts...) if err != nil { - return translateContainerdStartErr(container.Path, container.SetExitCode, err) + if errdefs.IsConflict(err) { + // If we are here and we are using the contaienrd snapshotters then it means + // someone created a container with the same ID as ours and the creation was + // interleaved between the check that the container exists and our creation. + // We can't continue in this case so return the error. + if daemon.UsesSnapshotter() { + return err + } + logrus.WithError(err).WithField("container", container.ID).Error("Container not cleaned up from containerd from previous run") + // best effort to clean up old container object + daemon.containerd.DeleteTask(context.Background(), container.ID) + if err := daemon.containerd.Delete(context.Background(), container.ID); err != nil && !errdefs.IsNotFound(err) { + logrus.WithError(err).WithField("container", container.ID).Error("Error cleaning up stale containerd container object") + } + err = daemon.containerd.Create(ctx, container.ID, spec, shim, createOptions) + } + if err != nil { + return translateContainerdStartErr(container.Path, container.SetExitCode, err) + } } } @@ -273,7 +291,9 @@ func (daemon *Daemon) Cleanup(container *container.Container) { container.CancelAttachContext() - if err := daemon.containerd.Delete(context.Background(), container.ID); err != nil { - logrus.Errorf("%s cleanup: failed to delete container from containerd: %v", container.ID, err) + if !daemon.UsesSnapshotter() { + if err := daemon.containerd.Delete(context.Background(), container.ID); err != nil { + logrus.Errorf("%s cleanup: failed to delete container from containerd: %v", container.ID, err) + } } } From 75ab0364742d881f1902727f158106d24dbd5d5a Mon Sep 17 00:00:00 2001 From: Nicolas De Loof Date: Wed, 10 Aug 2022 15:22:32 +0200 Subject: [PATCH 36/90] ContainerChanges is not implemented by snapshotter-based ImageService Signed-off-by: Nicolas De Loof --- daemon/changes.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/daemon/changes.go b/daemon/changes.go index 09f27b2161c1f..678df72ec8826 100644 --- a/daemon/changes.go +++ b/daemon/changes.go @@ -21,6 +21,9 @@ func (daemon *Daemon) ContainerChanges(name string) ([]archive.Change, error) { container.Lock() defer container.Unlock() + if daemon.UsesSnapshotter() { + panic("not implemented") + } if container.RWLayer == nil { return nil, errors.New("RWLayer of container " + name + " is unexpectedly nil") } From 4b0389238fc061d46d099f441c6f5230409914d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Gronowski?= Date: Wed, 10 Aug 2022 16:19:48 +0200 Subject: [PATCH 37/90] daemon: Fix not initialized network controller MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Paweł Gronowski --- daemon/daemon.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/daemon/daemon.go b/daemon/daemon.go index e1c2e50f0f2bc..6a0c4dbd7c622 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -8,6 +8,7 @@ package daemon // import "github.com/docker/docker/daemon" import ( "context" "fmt" + "io/fs" "net" "net/url" "os" @@ -204,11 +205,6 @@ func (daemon *Daemon) RegistryHosts() docker.RegistryHosts { } func (daemon *Daemon) restore(ctx context.Context) error { - // Restoring containers after a restart is not yet supported - // when using the containerd content store. - if daemon.UsesSnapshotter() { - return nil - } var mapLock sync.Mutex containers := make(map[string]*container.Container) @@ -219,6 +215,12 @@ func (daemon *Daemon) restore(ctx context.Context) error { return err } + // Restoring containers after a restart is not yet supported + // when using the containerd content store. + if daemon.UsesSnapshotter() { + dir = []fs.DirEntry{} + } + // parallelLimit is the maximum number of parallel startup jobs that we // allow (this is the limited used for all startup semaphores). The multipler // (128) was chosen after some fairly significant benchmarking -- don't change From c68a3c267391512a5e43d667cecae582f6d82371 Mon Sep 17 00:00:00 2001 From: Nicolas De Loof Date: Thu, 11 Aug 2022 14:55:00 +0200 Subject: [PATCH 38/90] GetImage to return image tags with details Signed-off-by: Nicolas De Loof --- api/server/router/image/image_routes.go | 3 +-- daemon/containerd/image.go | 7 +++++++ daemon/images/image.go | 4 ++++ image/image.go | 2 ++ 4 files changed, 14 insertions(+), 2 deletions(-) diff --git a/api/server/router/image/image_routes.go b/api/server/router/image/image_routes.go index 0ce48dee5cabb..cd6b2fe96a39a 100644 --- a/api/server/router/image/image_routes.go +++ b/api/server/router/image/image_routes.go @@ -207,10 +207,9 @@ func (s *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWriter } func (s *imageRouter) toImageInspect(img *image.Image) (*types.ImageInspect, error) { - refs := s.referenceBackend.References(img.ID().Digest()) repoTags := []string{} repoDigests := []string{} - for _, ref := range refs { + for _, ref := range img.Details.References { switch ref.(type) { case reference.NamedTagged: repoTags = append(repoTags, reference.FamiliarString(ref)) diff --git a/daemon/containerd/image.go b/daemon/containerd/image.go index 37de1f5553c0a..7c1eb61049992 100644 --- a/daemon/containerd/image.go +++ b/daemon/containerd/image.go @@ -41,7 +41,14 @@ func (i *ImageService) GetImage(ctx context.Context, refOrID string, options ima if err != nil { return nil, err } + + name, err := reference.ParseNamed(ii.Name()) + if err != nil { + return nil, err + } + img.Details = &image.Details{ + References: []reference.Named{name}, Size: size, Metadata: nil, Driver: i.GraphDriverName(), diff --git a/daemon/images/image.go b/daemon/images/image.go index 3bb37a0f8a3c4..6bd2ac1e37acc 100644 --- a/daemon/images/image.go +++ b/daemon/images/image.go @@ -173,10 +173,14 @@ func (i *ImageService) GetImage(ctx context.Context, refOrID string, options ima } lastUpdated, err := i.imageStore.GetLastUpdated(img.ID()) + + references := i.referenceStore.References(img.ID().Digest()) + if err != nil { return nil, err } img.Details = &image.Details{ + References: references, Size: size, Metadata: layerMetadata, Driver: i.layerStore.DriverName(), diff --git a/image/image.go b/image/image.go index 25179a1c8a693..0c6bf257f805a 100644 --- a/image/image.go +++ b/image/image.go @@ -9,6 +9,7 @@ import ( "strings" "time" + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types/container" "github.com/docker/docker/dockerversion" "github.com/docker/docker/layer" @@ -119,6 +120,7 @@ type Image struct { // Details provides additional image data type Details struct { + References []reference.Named Size int64 Metadata map[string]string Driver string From da0ac40d9a02140803bec2299bf32a6740ddbfd7 Mon Sep 17 00:00:00 2001 From: Nicolas De Loof Date: Thu, 11 Aug 2022 15:26:36 +0200 Subject: [PATCH 39/90] list images matching digest to discover all tags Signed-off-by: Nicolas De Loof --- daemon/containerd/image.go | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/daemon/containerd/image.go b/daemon/containerd/image.go index 7c1eb61049992..aade0e66db617 100644 --- a/daemon/containerd/image.go +++ b/daemon/containerd/image.go @@ -42,13 +42,18 @@ func (i *ImageService) GetImage(ctx context.Context, refOrID string, options ima return nil, err } - name, err := reference.ParseNamed(ii.Name()) - if err != nil { - return nil, err + tagged, err := i.client.ImageService().List(ctx, fmt.Sprintf("target.digest==%s", ii.Target().Digest.String())) + tags := make([]reference.Named, 0, len(tagged)) + for _, i := range tagged { + name, err := reference.ParseNamed(i.Name) + if err != nil { + return nil, err + } + tags = append(tags, name) } img.Details = &image.Details{ - References: []reference.Named{name}, + References: tags, Size: size, Metadata: nil, Driver: i.GraphDriverName(), From c8a429de244e61e75d8763f84a49bcd6cb433524 Mon Sep 17 00:00:00 2001 From: Nicolas De Loof Date: Fri, 12 Aug 2022 10:52:12 +0200 Subject: [PATCH 40/90] prefer error over panic where possible Signed-off-by: Nicolas De Loof --- daemon/changes.go | 2 +- daemon/containerd/image_builder.go | 5 +++-- daemon/containerd/image_commit.go | 3 ++- daemon/containerd/image_history.go | 3 ++- daemon/containerd/image_import.go | 3 ++- daemon/containerd/image_pull.go | 3 ++- daemon/containerd/image_search.go | 3 ++- daemon/containerd/image_squash.go | 4 +++- daemon/containerd/service.go | 11 ++++++----- 9 files changed, 23 insertions(+), 14 deletions(-) diff --git a/daemon/changes.go b/daemon/changes.go index 678df72ec8826..3e54cb789c02a 100644 --- a/daemon/changes.go +++ b/daemon/changes.go @@ -22,7 +22,7 @@ func (daemon *Daemon) ContainerChanges(name string) ([]archive.Change, error) { container.Lock() defer container.Unlock() if daemon.UsesSnapshotter() { - panic("not implemented") + return nil, errors.New("not implemented") } if container.RWLayer == nil { return nil, errors.New("RWLayer of container " + name + " is unexpectedly nil") diff --git a/daemon/containerd/image_builder.go b/daemon/containerd/image_builder.go index f57900b1c1ae8..c09bd2da15991 100644 --- a/daemon/containerd/image_builder.go +++ b/daemon/containerd/image_builder.go @@ -2,6 +2,7 @@ package containerd import ( "context" + "errors" "github.com/docker/docker/api/types/backend" "github.com/docker/docker/builder" @@ -11,12 +12,12 @@ import ( // reference or ID. Every call to GetImageAndReleasableLayer MUST call // releasableLayer.Release() to prevent leaking of layers. func (i *ImageService) GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (builder.Image, builder.ROLayer, error) { - panic("not implemented") + return nil, nil, errors.New("not implemented") } // CreateImage creates a new image by adding a config and ID to the image store. // This is similar to LoadImage() except that it receives JSON encoded bytes of // an image instead of a tar archive. func (i *ImageService) CreateImage(config []byte, parent string) (builder.Image, error) { - panic("not implemented") + return nil, errors.New("not implemented") } diff --git a/daemon/containerd/image_commit.go b/daemon/containerd/image_commit.go index 113e39f49646c..d3d479fbed2cb 100644 --- a/daemon/containerd/image_commit.go +++ b/daemon/containerd/image_commit.go @@ -6,6 +6,7 @@ import ( "crypto/rand" "encoding/base64" "encoding/json" + "errors" "fmt" "runtime" "time" @@ -313,5 +314,5 @@ func uniquePart() string { // // This is a temporary shim. Should be removed when builder stops using commit. func (i *ImageService) CommitBuildStep(ctx context.Context, c backend.CommitConfig) (image.ID, error) { - panic("not implemented") + return "", errors.New("not implemented") } diff --git a/daemon/containerd/image_history.go b/daemon/containerd/image_history.go index eef6c8ce391c0..d79333a75b929 100644 --- a/daemon/containerd/image_history.go +++ b/daemon/containerd/image_history.go @@ -2,6 +2,7 @@ package containerd import ( "context" + "errors" imagetype "github.com/docker/docker/api/types/image" ) @@ -9,5 +10,5 @@ import ( // ImageHistory returns a slice of ImageHistory structures for the specified // image name by walking the image lineage. func (i *ImageService) ImageHistory(ctx context.Context, name string) ([]*imagetype.HistoryResponseItem, error) { - panic("not implemented") + return nil, errors.New("not implemented") } diff --git a/daemon/containerd/image_import.go b/daemon/containerd/image_import.go index 9e28bfe5cf6a7..cae17b0137398 100644 --- a/daemon/containerd/image_import.go +++ b/daemon/containerd/image_import.go @@ -2,6 +2,7 @@ package containerd import ( "context" + "errors" "io" specs "github.com/opencontainers/image-spec/specs-go/v1" @@ -12,5 +13,5 @@ import ( // written to outStream. Repository and tag names can optionally be given in // the repo and tag arguments, respectively. func (i *ImageService) ImportImage(ctx context.Context, src string, repository string, platform *specs.Platform, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error { - panic("not implemented") + return errors.New("not implemented") } diff --git a/daemon/containerd/image_pull.go b/daemon/containerd/image_pull.go index d754b862bf874..dcb9861383832 100644 --- a/daemon/containerd/image_pull.go +++ b/daemon/containerd/image_pull.go @@ -2,6 +2,7 @@ package containerd import ( "context" + "errors" "io" "github.com/containerd/containerd" @@ -80,7 +81,7 @@ func (i *ImageService) PullImage(ctx context.Context, image, tagOrDigest string, // GetRepository returns a repository from the registry. func (i *ImageService) GetRepository(ctx context.Context, ref reference.Named, authConfig *registry.AuthConfig) (distribution.Repository, error) { - panic("not implemented") + return nil, errors.New("not implemented") } func (i *ImageService) applySnapshotterOpts(opts []containerd.RemoteOpt, ref reference.Named) []containerd.RemoteOpt { diff --git a/daemon/containerd/image_search.go b/daemon/containerd/image_search.go index 5524fb990681a..bccb44417e23d 100644 --- a/daemon/containerd/image_search.go +++ b/daemon/containerd/image_search.go @@ -2,6 +2,7 @@ package containerd import ( "context" + "errors" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/registry" @@ -13,5 +14,5 @@ import ( // TODO: this could be implemented in a registry service instead of the image // service. func (i *ImageService) SearchRegistryForImages(ctx context.Context, searchFilters filters.Args, term string, limit int, authConfig *registry.AuthConfig, metaHeaders map[string][]string) (*registry.SearchResults, error) { - panic("not implemented") + return nil, errors.New("not implemented") } diff --git a/daemon/containerd/image_squash.go b/daemon/containerd/image_squash.go index 7fa19f692f6f8..4899a2bb5c25b 100644 --- a/daemon/containerd/image_squash.go +++ b/daemon/containerd/image_squash.go @@ -1,5 +1,7 @@ package containerd +import "errors" + // SquashImage creates a new image with the diff of the specified image and // the specified parent. This new image contains only the layers from its // parent + 1 extra layer which contains the diff of all the layers in between. @@ -7,5 +9,5 @@ package containerd // image with the diff of all the specified image's layers merged into a new // layer that has no parents. func (i *ImageService) SquashImage(id, parent string) (string, error) { - panic("not implemented") + return "", errors.New("not implemented") } diff --git a/daemon/containerd/service.go b/daemon/containerd/service.go index 563e9a00b29e8..65f22e9b3735b 100644 --- a/daemon/containerd/service.go +++ b/daemon/containerd/service.go @@ -2,6 +2,7 @@ package containerd import ( "context" + "errors" "fmt" "github.com/containerd/containerd" @@ -62,13 +63,13 @@ func (i *ImageService) Children(id image.ID) []image.ID { // called from create.go // TODO: accept an opt struct instead of container? func (i *ImageService) CreateLayer(container *container.Container, initFunc layer.MountInit) (layer.RWLayer, error) { - panic("not implemented") + return nil, errors.New("not implemented") } // GetLayerByID returns a layer by ID // called from daemon.go Daemon.restore(), and Daemon.containerExport(). func (i *ImageService) GetLayerByID(cid string) (layer.RWLayer, error) { - panic("not implemented") + return nil, errors.New("not implemented") } // LayerStoreStatus returns the status for each layer store @@ -81,7 +82,7 @@ func (i *ImageService) LayerStoreStatus() [][2]string { // called from daemon.go Daemon.Shutdown(), and Daemon.Cleanup() (cleanup is actually continerCleanup) // TODO: needs to be refactored to Unmount (see callers), or removed and replaced with GetLayerByID func (i *ImageService) GetLayerMountID(cid string) (string, error) { - panic("not implemented") + return "", errors.New("not implemented") } // Cleanup resources before the process is shutdown. @@ -101,7 +102,7 @@ func (i *ImageService) GraphDriverName() string { // ReleaseLayer releases a layer allowing it to be removed // called from delete.go Daemon.cleanupContainer(), and Daemon.containerExport() func (i *ImageService) ReleaseLayer(rwlayer layer.RWLayer) error { - panic("not implemented") + return errors.New("not implemented") } // LayerDiskUsage returns the number of bytes used by layer stores @@ -165,7 +166,7 @@ func (i *ImageService) UpdateConfig(maxDownloads, maxUploads int) { // GetLayerFolders returns the layer folders from an image RootFS. func (i *ImageService) GetLayerFolders(img *image.Image, rwLayer layer.RWLayer) ([]string, error) { - panic("not implemented") + return nil, errors.New("not implemented") } // GetContainerLayerSize returns the real size & virtual size of the container. From 300ddad5028409a7249e8f5af565fb1ee9674c03 Mon Sep 17 00:00:00 2001 From: Nicolas De Loof Date: Fri, 5 Aug 2022 09:57:08 +0200 Subject: [PATCH 41/90] remove GetLayerByID from ImageService interface Signed-off-by: Nicolas De Loof --- api/server/router/container/backend.go | 2 +- .../router/container/container_routes.go | 2 +- daemon/containerd/image_exporter.go | 16 ++++++- daemon/containerd/service.go | 6 --- daemon/daemon.go | 17 +++++-- daemon/export.go | 48 ++++++------------- daemon/image_service.go | 3 +- daemon/images/image_exporter.go | 21 ++++++++ 8 files changed, 66 insertions(+), 49 deletions(-) diff --git a/api/server/router/container/backend.go b/api/server/router/container/backend.go index eea44d8b7a32f..0c53282b38302 100644 --- a/api/server/router/container/backend.go +++ b/api/server/router/container/backend.go @@ -25,7 +25,7 @@ type execBackend interface { type copyBackend interface { ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) ContainerCopy(name string, res string) (io.ReadCloser, error) - ContainerExport(name string, out io.Writer) error + ContainerExport(ctx context.Context, name string, out io.Writer) error ContainerExtractToDir(name, path string, copyUIDGID, noOverwriteDirNonDir bool, content io.Reader) error ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) } diff --git a/api/server/router/container/container_routes.go b/api/server/router/container/container_routes.go index 5df051fc95f4a..6de22a451157a 100644 --- a/api/server/router/container/container_routes.go +++ b/api/server/router/container/container_routes.go @@ -170,7 +170,7 @@ func (s *containerRouter) getContainersLogs(ctx context.Context, w http.Response } func (s *containerRouter) getContainersExport(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - return s.backend.ContainerExport(vars["name"], w) + return s.backend.ContainerExport(ctx, vars["name"], w) } type bodyOnStartError struct{} diff --git a/daemon/containerd/image_exporter.go b/daemon/containerd/image_exporter.go index 679f3b1c2ec24..658f12aed17bd 100644 --- a/daemon/containerd/image_exporter.go +++ b/daemon/containerd/image_exporter.go @@ -9,13 +9,27 @@ import ( containerdimages "github.com/containerd/containerd/images" "github.com/containerd/containerd/images/archive" "github.com/containerd/containerd/images/converter" + "github.com/containerd/containerd/mount" "github.com/containerd/containerd/platforms" "github.com/docker/distribution/reference" - "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/containerfs" + v1 "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) +func (i *ImageService) PerformWithBaseFS(ctx context.Context, c *container.Container, fn func(containerfs.ContainerFS) error) error { + snapshotter := i.client.SnapshotService(containerd.DefaultSnapshotter) + mounts, err := snapshotter.Mounts(ctx, c.ID) + if err != nil { + return err + } + return mount.WithTempMount(ctx, mounts, func(root string) error { + return fn(containerfs.NewLocalContainerFS(root)) + }) +} + // LoadImage uploads a set of images into the repository. This is the // complement of ExportImage. The input stream is an uncompressed tar // ball containing images and metadata. diff --git a/daemon/containerd/service.go b/daemon/containerd/service.go index 65f22e9b3735b..b19cc84b963af 100644 --- a/daemon/containerd/service.go +++ b/daemon/containerd/service.go @@ -66,12 +66,6 @@ func (i *ImageService) CreateLayer(container *container.Container, initFunc laye return nil, errors.New("not implemented") } -// GetLayerByID returns a layer by ID -// called from daemon.go Daemon.restore(), and Daemon.containerExport(). -func (i *ImageService) GetLayerByID(cid string) (layer.RWLayer, error) { - return nil, errors.New("not implemented") -} - // LayerStoreStatus returns the status for each layer store // called from info.go func (i *ImageService) LayerStoreStatus() [][2]string { diff --git a/daemon/daemon.go b/daemon/daemon.go index 6a0c4dbd7c622..ec6857a6835d0 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -204,6 +204,11 @@ func (daemon *Daemon) RegistryHosts() docker.RegistryHosts { return resolver.NewRegistryConfig(m) } +// layerAccessor may be implemented by ImageService +type layerAccessor interface { + GetLayerByID(cid string) (layer.RWLayer, error) +} + func (daemon *Daemon) restore(ctx context.Context) error { var mapLock sync.Mutex containers := make(map[string]*container.Container) @@ -252,12 +257,14 @@ func (daemon *Daemon) restore(ctx context.Context) error { } // Ignore the container if it does not support the current driver being used by the graph if (c.Driver == "" && daemon.graphDriver == "aufs") || c.Driver == daemon.graphDriver { - rwlayer, err := daemon.imageService.GetLayerByID(c.ID) - if err != nil { - log.WithError(err).Error("failed to load container mount") - return + if accessor, ok := daemon.imageService.(layerAccessor); ok { + rwlayer, err := accessor.GetLayerByID(c.ID) + if err != nil { + log.WithError(err).Error("failed to load container mount") + return + } + c.RWLayer = rwlayer } - c.RWLayer = rwlayer log.WithFields(logrus.Fields{ "running": c.IsRunning(), "paused": c.IsPaused(), diff --git a/daemon/export.go b/daemon/export.go index b248def224d1f..21ad4cc9e01ad 100644 --- a/daemon/export.go +++ b/daemon/export.go @@ -1,18 +1,19 @@ package daemon // import "github.com/docker/docker/daemon" import ( + "context" "fmt" "io" "github.com/docker/docker/container" "github.com/docker/docker/errdefs" "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/containerfs" ) // ContainerExport writes the contents of the container to the given // writer. An error is returned if the container cannot be found. -func (daemon *Daemon) ContainerExport(name string, out io.Writer) error { +func (daemon *Daemon) ContainerExport(ctx context.Context, name string, out io.Writer) error { ctr, err := daemon.GetContainer(name) if err != nil { return err @@ -32,49 +33,28 @@ func (daemon *Daemon) ContainerExport(name string, out io.Writer) error { return errdefs.Conflict(err) } - data, err := daemon.containerExport(ctr) + err = daemon.containerExport(ctx, ctr, out) if err != nil { return fmt.Errorf("Error exporting container %s: %v", name, err) } - defer data.Close() - // Stream the entire contents of the container (basically a volatile snapshot) - if _, err := io.Copy(out, data); err != nil { - return fmt.Errorf("Error exporting container %s: %v", name, err) - } return nil } -func (daemon *Daemon) containerExport(container *container.Container) (arch io.ReadCloser, err error) { - rwlayer, err := daemon.imageService.GetLayerByID(container.ID) - if err != nil { - return nil, err - } - defer func() { +func (daemon *Daemon) containerExport(ctx context.Context, container *container.Container, out io.Writer) error { + daemon.imageService.PerformWithBaseFS(ctx, container, func(basefs containerfs.ContainerFS) error { + archv, err := archivePath(basefs, basefs.Path(), &archive.TarOptions{ + Compression: archive.Uncompressed, + IDMap: daemon.idMapping, + }, basefs.Path()) if err != nil { - daemon.imageService.ReleaseLayer(rwlayer) + return err } - }() - - basefs, err := rwlayer.Mount(container.GetMountLabel()) - if err != nil { - return nil, err - } - archv, err := archivePath(basefs, basefs.Path(), &archive.TarOptions{ - Compression: archive.Uncompressed, - IDMap: daemon.idMapping, - }, basefs.Path()) - if err != nil { - rwlayer.Unmount() - return nil, err - } - arch = ioutils.NewReadCloserWrapper(archv, func() error { - err := archv.Close() - rwlayer.Unmount() - daemon.imageService.ReleaseLayer(rwlayer) + // Stream the entire contents of the container (basically a volatile snapshot) + _, err = io.Copy(out, archv) return err }) daemon.LogContainerEvent(container, "export") - return arch, err + return nil } diff --git a/daemon/image_service.go b/daemon/image_service.go index 83f382af3f139..0522d320266cd 100644 --- a/daemon/image_service.go +++ b/daemon/image_service.go @@ -16,6 +16,7 @@ import ( "github.com/docker/docker/daemon/images" "github.com/docker/docker/image" "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/containerfs" v1 "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -30,6 +31,7 @@ type ImageService interface { CreateImage(config []byte, parent string) (builder.Image, error) ImageDelete(ctx context.Context, imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error) ExportImage(ctx context.Context, names []string, outStream io.Writer) error + PerformWithBaseFS(ctx context.Context, c *container.Container, fn func(containerfs.ContainerFS) error) error LoadImage(ctx context.Context, inTar io.ReadCloser, outStream io.Writer, quiet bool) error Images(ctx context.Context, opts types.ImageListOptions) ([]*types.ImageSummary, error) CountImages() int @@ -47,7 +49,6 @@ type ImageService interface { GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (builder.Image, builder.ROLayer, error) CreateLayer(container *container.Container, initFunc layer.MountInit) (layer.RWLayer, error) - GetLayerByID(cid string) (layer.RWLayer, error) LayerStoreStatus() [][2]string GetLayerMountID(cid string) (string, error) ReleaseLayer(rwlayer layer.RWLayer) error diff --git a/daemon/images/image_exporter.go b/daemon/images/image_exporter.go index 1a5880ec3b84b..88470dbe527a2 100644 --- a/daemon/images/image_exporter.go +++ b/daemon/images/image_exporter.go @@ -4,7 +4,9 @@ import ( "context" "io" + "github.com/docker/docker/container" "github.com/docker/docker/image/tarexport" + "github.com/docker/docker/pkg/containerfs" ) // ExportImage exports a list of images to the given output stream. The @@ -17,6 +19,25 @@ func (i *ImageService) ExportImage(ctx context.Context, names []string, outStrea return imageExporter.Save(names, outStream) } +func (i *ImageService) PerformWithBaseFS(ctx context.Context, c *container.Container, fn func(containerfs.ContainerFS) error) error { + rwlayer, err := i.GetLayerByID(c.ID) + if err != nil { + return err + } + defer func() { + if err != nil { + i.ReleaseLayer(rwlayer) + } + }() + + basefs, err := rwlayer.Mount(c.GetMountLabel()) + if err != nil { + return err + } + + return fn(basefs) +} + // LoadImage uploads a set of images into the repository. This is the // complement of ExportImage. The input stream is an uncompressed tar // ball containing images and metadata. From 5f16ca5046f0ed0f3bb8b0a21457ff41b242a65f Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Fri, 12 Aug 2022 19:50:43 -0700 Subject: [PATCH 42/90] builder-next: reenable runc executor MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, without special CNI config the builder would only create host network containers that is a security issue. Using runc directly instead of shim is faster as well as builder doesn’t need anything from shim. The overhead of setting up network sandbox is much slower of course. Signed-off-by: Tonis Tiigi --- builder/builder-next/controller.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/builder/builder-next/controller.go b/builder/builder-next/controller.go index 7c1490fc73fa8..0aba4d21f3c07 100644 --- a/builder/builder-next/controller.go +++ b/builder/builder-next/controller.go @@ -91,6 +91,12 @@ func newSnapshotterController(rt http.RoundTripper, opt Opt) (*mobycontrol.Contr wo.GCPolicy = policy wo.RegistryHosts = opt.RegistryHosts + exec, err := newExecutor(opt.Root, opt.DefaultCgroupParent, opt.NetworkController, dns, opt.Rootless, opt.IdentityMapping, opt.ApparmorProfile) + if err != nil { + return nil, err + } + wo.Executor = exec + w, err := mobyworker.NewContainerdWorker(context.TODO(), wo) if err != nil { return nil, err From e54c52c8e269bf9ba0d9390b74b881433235ec38 Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Fri, 12 Aug 2022 20:03:23 -0700 Subject: [PATCH 43/90] builder-next: enable more cache backends Signed-off-by: Tonis Tiigi --- builder/builder-next/controller.go | 8 +- vendor.mod | 3 + vendor.sum | 509 ++++++++++++++ .../github.com/dimchansky/utfbom/.gitignore | 37 + .../github.com/dimchansky/utfbom/.travis.yml | 29 + vendor/github.com/dimchansky/utfbom/LICENSE | 201 ++++++ vendor/github.com/dimchansky/utfbom/README.md | 66 ++ vendor/github.com/dimchansky/utfbom/utfbom.go | 192 ++++++ .../github.com/golang-jwt/jwt/v4/.gitignore | 4 + vendor/github.com/golang-jwt/jwt/v4/LICENSE | 9 + .../golang-jwt/jwt/v4/MIGRATION_GUIDE.md | 22 + vendor/github.com/golang-jwt/jwt/v4/README.md | 114 ++++ .../golang-jwt/jwt/v4/VERSION_HISTORY.md | 135 ++++ vendor/github.com/golang-jwt/jwt/v4/claims.go | 267 ++++++++ vendor/github.com/golang-jwt/jwt/v4/doc.go | 4 + vendor/github.com/golang-jwt/jwt/v4/ecdsa.go | 142 ++++ .../golang-jwt/jwt/v4/ecdsa_utils.go | 69 ++ .../github.com/golang-jwt/jwt/v4/ed25519.go | 85 +++ .../golang-jwt/jwt/v4/ed25519_utils.go | 64 ++ vendor/github.com/golang-jwt/jwt/v4/errors.go | 59 ++ vendor/github.com/golang-jwt/jwt/v4/hmac.go | 95 +++ .../golang-jwt/jwt/v4/map_claims.go | 148 ++++ vendor/github.com/golang-jwt/jwt/v4/none.go | 52 ++ vendor/github.com/golang-jwt/jwt/v4/parser.go | 148 ++++ vendor/github.com/golang-jwt/jwt/v4/rsa.go | 101 +++ .../github.com/golang-jwt/jwt/v4/rsa_pss.go | 142 ++++ .../github.com/golang-jwt/jwt/v4/rsa_utils.go | 105 +++ .../golang-jwt/jwt/v4/signing_method.go | 35 + .../golang-jwt/jwt/v4/staticcheck.conf | 1 + vendor/github.com/golang-jwt/jwt/v4/token.go | 110 +++ vendor/github.com/golang-jwt/jwt/v4/types.go | 125 ++++ .../buildkit/cache/remotecache/gha/gha.go | 388 +++++++++++ .../tonistiigi/go-actions-cache/LICENSE | 21 + .../tonistiigi/go-actions-cache/api.md | 66 ++ .../tonistiigi/go-actions-cache/cache.go | 645 ++++++++++++++++++ .../tonistiigi/go-actions-cache/readerat.go | 89 +++ .../tonistiigi/go-actions-cache/retry.go | 108 +++ vendor/modules.txt | 10 + 38 files changed, 4407 insertions(+), 1 deletion(-) create mode 100644 vendor/github.com/dimchansky/utfbom/.gitignore create mode 100644 vendor/github.com/dimchansky/utfbom/.travis.yml create mode 100644 vendor/github.com/dimchansky/utfbom/LICENSE create mode 100644 vendor/github.com/dimchansky/utfbom/README.md create mode 100644 vendor/github.com/dimchansky/utfbom/utfbom.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/.gitignore create mode 100644 vendor/github.com/golang-jwt/jwt/v4/LICENSE create mode 100644 vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md create mode 100644 vendor/github.com/golang-jwt/jwt/v4/README.md create mode 100644 vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md create mode 100644 vendor/github.com/golang-jwt/jwt/v4/claims.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/doc.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/ecdsa.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/ed25519.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/errors.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/hmac.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/map_claims.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/none.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/parser.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/rsa.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/signing_method.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf create mode 100644 vendor/github.com/golang-jwt/jwt/v4/token.go create mode 100644 vendor/github.com/golang-jwt/jwt/v4/types.go create mode 100644 vendor/github.com/moby/buildkit/cache/remotecache/gha/gha.go create mode 100644 vendor/github.com/tonistiigi/go-actions-cache/LICENSE create mode 100644 vendor/github.com/tonistiigi/go-actions-cache/api.md create mode 100644 vendor/github.com/tonistiigi/go-actions-cache/cache.go create mode 100644 vendor/github.com/tonistiigi/go-actions-cache/readerat.go create mode 100644 vendor/github.com/tonistiigi/go-actions-cache/retry.go diff --git a/builder/builder-next/controller.go b/builder/builder-next/controller.go index 7c1490fc73fa8..461dbd8ad5689 100644 --- a/builder/builder-next/controller.go +++ b/builder/builder-next/controller.go @@ -26,8 +26,10 @@ import ( "github.com/moby/buildkit/cache" "github.com/moby/buildkit/cache/metadata" "github.com/moby/buildkit/cache/remotecache" + "github.com/moby/buildkit/cache/remotecache/gha" inlineremotecache "github.com/moby/buildkit/cache/remotecache/inline" localremotecache "github.com/moby/buildkit/cache/remotecache/local" + registryremotecache "github.com/moby/buildkit/cache/remotecache/registry" "github.com/moby/buildkit/client" "github.com/moby/buildkit/frontend" dockerfile "github.com/moby/buildkit/frontend/dockerfile/builder" @@ -120,9 +122,13 @@ func newSnapshotterController(rt http.RoundTripper, opt Opt) (*mobycontrol.Contr ResolveCacheImporterFuncs: map[string]remotecache.ResolveCacheImporterFunc{ "registry": localinlinecache.ResolveCacheImporterFunc(opt.SessionManager, opt.RegistryHosts, wa.ContentStore(), dist.ReferenceStore, dist.ImageStore), "local": localremotecache.ResolveCacheImporterFunc(opt.SessionManager), + "gha": gha.ResolveCacheImporterFunc(), }, ResolveCacheExporterFuncs: map[string]remotecache.ResolveCacheExporterFunc{ - "inline": inlineremotecache.ResolveCacheExporterFunc(), + "registry": registryremotecache.ResolveCacheExporterFunc(opt.SessionManager, opt.RegistryHosts), + "inline": inlineremotecache.ResolveCacheExporterFunc(), + "local": localremotecache.ResolveCacheExporterFunc(opt.SessionManager), + "gha": gha.ResolveCacheExporterFunc(), }, Entitlements: getEntitlements(opt.BuilderConfig), UseSnapshotter: true, diff --git a/vendor.mod b/vendor.mod index 8e4b4fd0465f2..dd2c1c647b301 100644 --- a/vendor.mod +++ b/vendor.mod @@ -106,6 +106,7 @@ require ( github.com/containerd/ttrpc v1.1.0 // indirect github.com/containernetworking/cni v1.1.1 // indirect github.com/cyphar/filepath-securejoin v0.2.3 // indirect + github.com/dimchansky/utfbom v1.1.1 // indirect github.com/dustin/go-humanize v1.0.0 // indirect github.com/felixge/httpsnoop v1.0.2 // indirect github.com/fernet/fernet-go v0.0.0-20180830025343-9eac43b88a5e // indirect @@ -114,6 +115,7 @@ require ( github.com/go-logr/stdr v1.2.2 // indirect github.com/gofrs/flock v0.8.1 // indirect github.com/gogo/googleapis v1.4.1 // indirect + github.com/golang-jwt/jwt/v4 v4.1.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/btree v1.0.1 // indirect @@ -140,6 +142,7 @@ require ( github.com/rexray/gocsi v1.2.2 // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/tinylib/msgp v1.1.0 // indirect + github.com/tonistiigi/go-actions-cache v0.0.0-20220404170428-0bdeb6e1eac7 // indirect github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.2 // indirect go.etcd.io/etcd/pkg/v3 v3.5.2 // indirect diff --git a/vendor.sum b/vendor.sum index 1bee329ccd320..5ed8c616d0dc2 100644 --- a/vendor.sum +++ b/vendor.sum @@ -1,13 +1,20 @@ bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= +cloud.google.com/go v0.25.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.2/go.mod h1:H8IAquKe2L30IxoupDgqTaQvKSwF/c8prYHynGIWQbA= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= @@ -48,33 +55,87 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= code.cloudfoundry.org/clock v1.0.0 h1:kFXWQM4bxYvdBw2X8BbBeXwQNgfoWv1vqAk2ZZyBN2o= code.cloudfoundry.org/clock v1.0.0/go.mod h1:QD9Lzhd/ux6eNQVUDVRJX/RKTigpewimNYBi7ivZKY8= +code.gitea.io/sdk/gitea v0.12.0/go.mod h1:z3uwDV/b9Ls47NGukYM9XhnHtqPh/J+t40lsUrR6JDY= +contrib.go.opencensus.io/exporter/aws v0.0.0-20181029163544-2befc13012d0/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA= +contrib.go.opencensus.io/exporter/ocagent v0.5.0/go.mod h1:ImxhfLRpxoYiSq891pBrLVhN+qmP8BTVvdH2YLs7Gl0= +contrib.go.opencensus.io/exporter/stackdriver v0.12.1/go.mod h1:iwB6wGarfphGGe/e5CWqyUk/cLzKnWsOKPVW3no6OTw= +contrib.go.opencensus.io/integrations/ocsql v0.1.4/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE= +contrib.go.opencensus.io/resource v0.1.1/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcigGlFvXwEGEnkRLA= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= +github.com/AkihiroSuda/containerd-fuse-overlayfs v1.0.0/go.mod h1:0mMDvQFeLbbn1Wy8P2j3hwFhqBq+FKn8OZPno8WLmp8= +github.com/Azure/azure-amqp-common-go/v2 v2.1.0/go.mod h1:R8rea+gJRuJR6QxTir/XuEd+YuKoUiazDC/N96FiDEU= +github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= +github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v19.1.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v29.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v30.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v35.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v38.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v42.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-service-bus-go v0.9.1/go.mod h1:yzBx6/BUGfjfeqbRZny9AQIbIe3AcV9WZbAdpkoXOa0= +github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v10.15.5+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v12.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v14.1.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= +github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= +github.com/Azure/go-autorest/autorest v0.10.2/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= +github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= +github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= +github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= +github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Djarvur/go-err113 v0.0.0-20200410182137-af658d038157/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= +github.com/Djarvur/go-err113 v0.1.0/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= +github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20191009163259-e802c2cb94ae/go.mod h1:mjwGPas4yKduTyubHvD1Atl9r1rUq8DfVy+gkVvZ+oo= +github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20190822182118-27a4ced34534/go.mod h1:iroGtC8B3tQiqtds1l+mgk/BBOrxbqjH+eUfFQYRc14= github.com/Graylog2/go-gelf v0.0.0-20191017102106-1550ee647df0 h1:cOjLyhBhe91glgZZNbQUg9BJC57l6BiSKov0Ivv7k0U= github.com/Graylog2/go-gelf v0.0.0-20191017102106-1550ee647df0/go.mod h1:fBaQWrftOD5CrVCUfoYGHs4X4VViTuGOXA8WloCjTY0= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.15-0.20200908182639-5b44b70ab3ab/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.15/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= @@ -87,6 +148,7 @@ github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXn github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.10/go.mod h1:g5uw8EV2mAlzqe94tfNBNdr89fnbD/n3HV0OhsddkmM= github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= @@ -96,11 +158,13 @@ github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01 github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= github.com/Microsoft/hcsshim v0.9.4 h1:mnUj0ivWy6UzbB1uLFqKR6F+ZyiDc7j4iGgHTpO+5+I= github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc= +github.com/Microsoft/hcsshim/test v0.0.0-20200826032352-301c83a30e7c/go.mod h1:30A5igQ91GEmhYJF8TaRP79pMBOYynRsyOByfVV0dU4= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= @@ -108,12 +172,16 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdko github.com/RackSec/srslog v0.0.0-20180709174129-a4725f04ec91 h1:vX+gnvBc56EbWYrmlhYbFYRaeikAke1GL84N4BEYOFE= github.com/RackSec/srslog v0.0.0-20180709174129-a4725f04ec91/go.mod h1:cDLGBht23g0XQdLjzn6xOGXDkLK182YfINAaZEQLCHQ= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/akutz/gosync v0.1.0 h1:naxPT/aDYDh79PMwM3XmencmNQeYmpNFSZy4ZE9zIW0= github.com/akutz/gosync v0.1.0/go.mod h1:I8I4aiqJI1nqaeYOOB1WS+CgRJVVPqhct9Y4njywM84= github.com/akutz/memconn v0.1.0 h1:NawI0TORU4hcOMsMr11g7vwlCdkYeLKXBcxWu2W/P8A= github.com/akutz/memconn v0.1.0/go.mod h1:Jo8rI7m0NieZyLI5e2CDlRdRqRRB4S7Xp77ukDjH+Fw= +github.com/alecthomas/kingpin v2.2.6+incompatible/go.mod h1:59OFYbFVLKQKq+mqrL6Rw5bR0c3ACQaawgXx0QYndlE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -121,7 +189,14 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apex/log v1.1.4/go.mod h1:AlpoD9aScyQfJDVHmLMEcx4oU6LqzkWp4Mg9GdAcEvQ= +github.com/apex/log v1.3.0/go.mod h1:jd8Vpsr46WAe3EZSQ/IUMs2qQD/GOycT5rPWCO1yGcs= +github.com/apex/logs v0.0.4/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo= +github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE= +github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 h1:7Ip0wMmLHLRJdrloDxZfhMm0xrLXZS8+COSu2bXmEQs= github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= @@ -132,8 +207,17 @@ github.com/armon/go-radix v0.0.0-20150105235045-e39d623f12e8 h1:XGHqlQXxwVly7mpc github.com/armon/go-radix v0.0.0-20150105235045-e39d623f12e8/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/aws/aws-sdk-go v1.15.90/go.mod h1:es1KtYUFs7le0xQ3rOihkuoVD90z7D0fR2Qm4S00/gU= +github.com/aws/aws-sdk-go v1.16.26/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.19.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.19.45/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.11/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.27.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.31.6 h1:nKjQbpXhdImctBh1e0iLg9iQW/X297LPPuY/9f92R2k= github.com/aws/aws-sdk-go v1.31.6/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -144,10 +228,17 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bombsimon/wsl/v2 v2.0.0/go.mod h1:mf25kr/SqFEPhhcxW1+7pxzGlW+hIl/hYTKY95VwV8U= +github.com/bombsimon/wsl/v2 v2.2.0/go.mod h1:Azh8c3XGEJl9LyX0/sFC+CKMc7Ssgua0g+6abzXN4Pg= +github.com/bombsimon/wsl/v3 v3.0.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= +github.com/bombsimon/wsl/v3 v3.1.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= +github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/bsphere/le_go v0.0.0-20170215134836-7a984a84b549 h1:QJJnIXZ34OUK5JfWlq1l3n0SfO9g1amiLFIcTECgpq0= github.com/bsphere/le_go v0.0.0-20170215134836-7a984a84b549/go.mod h1:313oBJKClgRD/+t59eUnrfG7/xHXZJd7v+SjCacDm4Q= @@ -156,8 +247,12 @@ github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx2 github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/caarlos0/ctrlc v1.0.0/go.mod h1:CdXpj4rmq0q/1Eb44M9zi2nKB0QraNKuRGYGrrHhcQw= +github.com/campoy/unique v0.0.0-20180121183637-88950e537e7e/go.mod h1:9IOqJGCPMSc6E5ydlp5NIonxObaeu/Iub/X03EKPVYo= +github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e/go.mod h1:oDpT4efm8tSYHXV5tHSdRvBet/b/QzxZ+XyyPehvm3A= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 h1:uH66TXeswKn5PW5zdZ39xEwfS9an067BirqA+P4QaLI= @@ -197,6 +292,7 @@ github.com/cockroachdb/errors v1.2.4 h1:Lap807SXTH5tri2TivECb/4abUkMZC9zRoLarvcK github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f h1:o/kfcElHqOiXqcou5a3rIlMc7oJbMQkeLk0VQJ7zgqY= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= +github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= github.com/container-storage-interface/spec v1.5.0 h1:lvKxe3uLgqQeVQcrnL2CPQKISoKjTJxojEs9cBk+HXo= github.com/container-storage-interface/spec v1.5.0/go.mod h1:8K96oQNkJ7pFcC2R9Z1ynGGBB1I93kcS6PGg3SsOk8s= @@ -220,6 +316,7 @@ github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/console v1.0.0/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw= @@ -230,6 +327,8 @@ github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMX github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1-0.20201117152358-0edc412565dc/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= @@ -282,6 +381,7 @@ github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oM github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nydus-snapshotter v0.3.0-alpha.5 h1:r6x2KjHqlcTiLUEZi8C5S3irTGpqj+1Y/9wdOHsLaVM= github.com/containerd/nydus-snapshotter v0.3.0-alpha.5/go.mod h1:zBuYaCIt5l9DBvX89EQBrbp7K8wd2tvpR51KTxodtF8= +github.com/containerd/stargz-snapshotter v0.0.0-20201027054423-3a04e4c2c116/go.mod h1:o59b3PCKVAf9jjiKtCc/9hLAd+5p/rfhBfm6aBcTEr4= github.com/containerd/stargz-snapshotter v0.11.3 h1:D3PoF563XmOBdtfx2G6AkhbHueqwIVPBFn2mrsWLa3w= github.com/containerd/stargz-snapshotter v0.11.3/go.mod h1:2j2EAUyvrLU4D9unYlTIwGhDKQIk74KJ9E71lJsQCVM= github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= @@ -321,6 +421,7 @@ github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q= @@ -329,13 +430,16 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= @@ -351,24 +455,37 @@ github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg= +github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/deckarep/golang-set v0.0.0-20141123011944-ef32fa3046d9 h1:YpTz1+8tEHbybtxtMJNkV3U3GBAA05EakMRTR3dXkis= github.com/deckarep/golang-set v0.0.0-20141123011944-ef32fa3046d9/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= +github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v0.0.0-20190925022749-754388324470/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v20.10.0-beta1.0.20201029214301-1d20b15adc38+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v20.10.13+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v2.6.0-rc.1.0.20180327202408-83389a148052+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v0.0.0-20200511152416-a93e9eb0e95c/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.4.2-0.20180531152204-71cd53e4a197/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v17.12.0-ce-rc1.0.20200730172259-9f28837c1d93+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.0-beta1.0.20201110211921-af34b94a78a1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.3-0.20211208011758-87521affb077+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= @@ -381,10 +498,12 @@ github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6Uezg github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libkv v0.2.2-0.20211217103745-e480589147e3 h1:q6MhOaE4xsrl6cAiFYrazobNFSQN6ckhD6Et9zYbcrU= github.com/docker/libkv v0.2.2-0.20211217103745-e480589147e3/go.mod h1:r5hEwHwW8dr0TFBYGCarMNbrQOiwL1xoqDYZ/JqoTK0= +github.com/docker/libnetwork v0.8.0-dev.2.0.20200917202933-d0951081b35f/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4 h1:k8TfKGeAcDQFFQOGCQMRN04N4a9YrPlRMMKnzAuvM9Q= github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= @@ -395,6 +514,10 @@ github.com/dperny/gocsi v1.2.3-pre/go.mod h1:qQw5mIunz1RqMUfZcGJ9/Lt9EDaL0N3wPNY github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -407,11 +530,13 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fanliao/go-promise v0.0.0-20141029170127-1890db352a72/go.mod h1:PjfxuH4FZdUyfMdtBio2lsRr1AKEaVPwelzuHuh8Lqc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW4o= github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= @@ -419,8 +544,11 @@ github.com/fernet/fernet-go v0.0.0-20180830025343-9eac43b88a5e h1:P10tZmVD2XclAa github.com/fernet/fernet-go v0.0.0-20180830025343-9eac43b88a5e/go.mod h1:2H9hjfbpSMHwY503FclkV/lZTBh2YlOmLLSda12uL8c= github.com/fluent/fluent-logger-golang v1.9.0 h1:zUdY44CHX2oIUc7VTNZc+4m+ORuO/mldQDA7czhWXEg= github.com/fluent/fluent-logger-golang v1.9.0/go.mod h1:2/HCT/jTy78yGyeNGQLGQsjF3zzzAuy6Xlk6FCMV5eU= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.11.3 h1:8sXhOn0uLys67V8EsXLc6eszDs8VXWxL3iRvebPhedY= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -434,6 +562,9 @@ github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JY github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-critic/go-critic v0.4.1/go.mod h1:7/14rZGnZbY6E38VEGk2kVhoq6itzc1E68facVDK23g= +github.com/go-critic/go-critic v0.4.3/go.mod h1:j4O3D4RoIwRqlZw5jJpx0BNfXWWbpcJoKu5cYSe4YmQ= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -441,6 +572,7 @@ github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3I github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= @@ -454,6 +586,7 @@ github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbV github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -468,9 +601,27 @@ github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dp github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= +github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= +github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086/go.mod h1:mP93XdblcopXwlyN4X4uodxXQhldPGZbcEJIimQHrkg= +github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= +github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= +github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30/go.mod h1:SV2ur98SGypH1UjcPpCatrV5hPazG6+IfNHbkDXBRrk= +github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= +github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoMrjiy4zvdS+Bg6z9jZH82QXwkcgCBX6nOfnmdaHks= +github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= +github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= +github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= @@ -478,19 +629,25 @@ github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5x github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.6 h1:mkgN1ofwASrYnJ5W6U/BxG15eXXXjirgZc7CLqkcaro= github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/flock v0.7.3/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= +github.com/gogo/googleapis v1.3.2/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.1.0 h1:XUgk2Ex5veyVFVeLm0xhusUTQybEbexJXrvPNOKkSY0= +github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang/gddo v0.0.0-20190904175337-72a348e765d2 h1:xisWqjiKEff2B0KfFYGpCqc3M3zdTz+OHQHRc09FeYk= github.com/golang/gddo v0.0.0-20190904175337-72a348e765d2/go.mod h1:xEhNfoBDX1hzLm2Nf80qUvZ2sVwoMZ8d6IE2SrsQfh4= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -501,6 +658,7 @@ github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -510,6 +668,7 @@ github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -528,13 +687,35 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= +github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0= +github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= +github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3/go.mod h1:JXrF4TWy4tXYn62/9x8Wm/K/dm06p8tCKwFRDPZG/1o= +github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU= +github.com/golangci/gocyclo v0.0.0-20180528144436-0a533e8fa43d/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU= +github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= +github.com/golangci/golangci-lint v1.23.7/go.mod h1:g/38bxfhp4rI7zeWSxcdIeHTQGS58TCak8FYcyCmavQ= +github.com/golangci/golangci-lint v1.27.0/go.mod h1:+eZALfxIuthdrHPtfM7w/R3POJLjHDfJJw8XZl9xOng= +github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU= +github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= +github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= +github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= +github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= +github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI= +github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4= +github.com/golangci/revgrep v0.0.0-20180812185044-276a5c0a1039/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= +github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/certificate-transparency-go v1.0.20 h1:azETE79toaBOyp+StoEBy8atzQujL0PyBPEmsEeDCXI= github.com/google/certificate-transparency-go v1.0.20/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= +github.com/google/crfs v0.0.0-20191108021818-71d77da419c9/go.mod h1:etGhoOqfwPkooV6aqoX3eBGQOJblqdoc9XvWOeuxpPw= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -549,11 +730,21 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-containerregistry v0.0.0-20191010200024-a3d713f9b7f8/go.mod h1:KyKXa9ciM8+lgMXwOVsXi7UxGrsf9mM61Mzs+xKUrKE= +github.com/google/go-containerregistry v0.1.2/go.mod h1:GPivBPgdAyd2SU+vf6EpsgOtWDuPqjW0hJZt4rNdTZ4= github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-github/v28 v28.1.1/go.mod h1:bsqJWQX05omyWVmc00nEUql9mhQyv38lDZ8kPZcQVoM= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-replayers/grpcreplay v0.1.0/go.mod h1:8Ig2Idjpr6gifRd6pNVggX6TC1Zw6Jx74AKp7QNH2QE= +github.com/google/go-replayers/httpreplay v0.1.0/go.mod h1:YKZViNhiGgqdBlUbI2MwGpq4pXxNmhJLPHQ7cv2b5no= +github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= @@ -573,22 +764,37 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/rpmpack v0.0.0-20191226140753-aa36bfddb3a0/go.mod h1:RaTPr0KUf2K7fnZYLNDrr8rxAamWs3iNywJLtQ2AzBg= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/wire v0.3.0/go.mod h1:i1DMg/Lu8Sz5yYl25iOdmc5CT5qusaa+zmRWs16741s= +github.com/google/wire v0.4.0/go.mod h1:ngWDr9Qvq3yZA10YrxfyGELY/AFWGVpy9c1LTRi1EoU= +github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.2.2/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/gookit/color v1.2.4/go.mod h1:AhIE+pS6D4Ql0SQWbBeXPHw7gY0/sjHoA4s/n1KB7xg= +github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/goreleaser/goreleaser v0.136.0/go.mod h1:wiKrPUeSNh6Wu8nUHxZydSOVQ/OZvOaO7DTtFqie904= +github.com/goreleaser/nfpm v1.2.1/go.mod h1:TtWrABZozuLOttX2uDlYyECfQX7x5XYkVxhjYcR6G9w= +github.com/goreleaser/nfpm v1.3.0/go.mod h1:w0p7Kc9TAUgWMyrub63ex3M2Mgw88M4GZXoTq5UCb40= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= @@ -596,17 +802,28 @@ github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB7 github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.2/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= +github.com/hanwen/go-fuse v1.0.0/go.mod h1:unqXarDXqzAk0rt98O2tVndEPIpUgLD9+rwFisZH3Ok= +github.com/hanwen/go-fuse/v2 v2.0.3/go.mod h1:0EQM6aH2ctVpvZ6a+onrQ/vaykxh2GH7hy3e13vzTUY= github.com/hanwen/go-fuse/v2 v2.1.1-0.20220112183258-f57e95bda82d/go.mod h1:B1nGE/6RBFyBRC1RRnf23UpwCdyJ31eukw34oAKukAc= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= @@ -629,6 +846,8 @@ github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1: github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-retryablehttp v0.7.0/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= @@ -638,9 +857,11 @@ github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdv github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= @@ -652,12 +873,14 @@ github.com/hashicorp/memberlist v0.2.4/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOn github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.8.5 h1:ZynDUIQiA8usmRgPdGPHFdPnb1wgGI9tK3mO9hcAJjc= github.com/hashicorp/serf v0.8.5/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k= +github.com/hashicorp/uuid v0.0.0-20160311170451-ebb0a03e909c/go.mod h1:fHzc09UnyJyqyW+bFuq864eh+wC7dj65aXmXLRe5to0= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/hugelgupf/socketpair v0.0.0-20190730060125-05d35a94e714/go.mod h1:2Goc3h8EklBH5mspfHFxBnEoURQCGzQQH1ga9Myjvis= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= @@ -666,25 +889,39 @@ github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NH github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/insomniacslk/dhcp v0.0.0-20220119180841-3c283ff8b7dd/go.mod h1:h+MxyHxRg9NH3terB1nfRIUaQEcI0XOVkdR9LNBlp8E= github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ= +github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg= github.com/ishidawataru/sctp v0.0.0-20210707070123-9a39160e9062 h1:G1+wBT0dwjIrBdLy0MIG0i+E4CQxEnedHXdauJEIH6g= github.com/ishidawataru/sctp v0.0.0-20210707070123-9a39160e9062/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw= +github.com/jaguilar/vt100 v0.0.0-20150826170717-2703a27b14ea/go.mod h1:QMdK4dGB3YhEW2BmA1wgGpPYI3HZy/5gD705PXKUVSg= +github.com/jarcoal/httpmock v1.0.5/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= +github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= +github.com/jingyugao/rowserrcheck v0.0.0-20191204022205-72ab7603b68a/go.mod h1:xRskid8CManxVta/ALEhJha/pweKBaVG6fWgc0yH25s= +github.com/jirfag/go-printf-func-name v0.0.0-20191110105641-45db9963cdd3/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= +github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= github.com/jsimonetti/rtnetlink v0.0.0-20201009170750-9c6f07d100c1/go.mod h1:hqoO/u39cqLeBLebZ8fWdE96O7FxrAsRYhnVOdgHxok= github.com/jsimonetti/rtnetlink v0.0.0-20201110080708-d2c240429e6c/go.mod h1:huN4d1phzjhlOsNIjFsw2SVRbwIHj3fJDMEU2SDPTmg= +github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= @@ -697,10 +934,14 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A= github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -710,12 +951,18 @@ github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= +github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -723,14 +970,31 @@ github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/matoous/godox v0.0.0-20190911065817-5d6d842e92eb/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= +github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= @@ -742,6 +1006,7 @@ github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcK github.com/mdlayher/netlink v1.1.1/go.mod h1:WTYpFb/WTvlRJAyKhZL5/uy69TDDpHHu2VZmb2XgV7o= github.com/mdlayher/raw v0.0.0-20190606142536-fef19f00fc18/go.mod h1:7EpbotpCmVZcu+KCX4g9WaRNuu11uyhiW7+Le1dKawg= github.com/mdlayher/raw v0.0.0-20191009151244-50f2db8cc065/go.mod h1:7EpbotpCmVZcu+KCX4g9WaRNuu11uyhiW7+Le1dKawg= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.27 h1:aEH/kqUzUxGJ/UHcEKdJY+ugH6WEzsEBBSPa8zuy1aM= @@ -752,15 +1017,19 @@ github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4= github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/moby/buildkit v0.8.1/go.mod h1:/kyU1hKy/aYCuP39GZA9MaKioovHku57N6cqlKZIaiQ= github.com/moby/buildkit v0.10.3 h1:/dGykD8FW+H4p++q5+KqKEo6gAkYKyBQHdawdjVwVAU= github.com/moby/buildkit v0.10.3/go.mod h1:jxeOuly98l9gWHai0Ojrbnczrk/rf+o9/JqNhY+UCSo= github.com/moby/ipvs v1.0.2 h1:NSbzuRTvfneftLU3VwPU5QuA6NZ0IUmqq9+VHcQxqHw= @@ -770,8 +1039,12 @@ github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQ github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/swarmkit/v2 v2.0.0-20220420172245-6068d1894d46 h1:FVr9eatIpN7PlE2ZHP850rIJ6AQoZxoZvPSDR+WQY38= github.com/moby/swarmkit/v2 v2.0.0-20220420172245-6068d1894d46/go.mod h1:/so6Lct4y1x14UprW/loFsOe6xoXVTlvh25V36ULXNQ= +github.com/moby/sys/mount v0.1.0/go.mod h1:FVQFLDRWwyBjDTBNQXDlWnSFREqOo3OKX9aqhmeoo74= +github.com/moby/sys/mount v0.1.1/go.mod h1:FVQFLDRWwyBjDTBNQXDlWnSFREqOo3OKX9aqhmeoo74= github.com/moby/sys/mount v0.3.3 h1:fX1SVkXFJ47XWDoeFW4Sq7PdQJnV2QIDZAqjNqgEjUs= github.com/moby/sys/mount v0.3.3/go.mod h1:PBaEorSNTLG5t/+4EgukEQVlAvVEc6ZjTySwKdqp5K0= +github.com/moby/sys/mountinfo v0.1.0/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o= +github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o= github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= @@ -785,23 +1058,30 @@ github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGq github.com/moby/sys/symlink v0.2.0 h1:tk1rOM+Ljp0nFmfOIBtlV3rTDlWOwFRhjEeAhZB0nZc= github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/term v0.0.0-20200915141129-7f0af18e79f2/go.mod h1:TjQg8pa4iejrUrjiz0MCtMV38jdMNW4doKSiBrEvCQQ= github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/moby/vpnkit v0.5.0/go.mod h1:KyjUrL9cb6ZSNNAUwZfqRjhwwgJ3BJN+kXh0t43WTUQ= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mozilla/tls-observatory v0.0.0-20190404164649-a3c1b6cfecfd/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= +github.com/mozilla/tls-observatory v0.0.0-20200317151703-4fa42e1c2dee/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= +github.com/mrunalp/fileutils v0.0.0-20200520151820-abd8a0e76976/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5wEBSYHI2c= +github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= @@ -813,6 +1093,7 @@ github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.4.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -828,15 +1109,18 @@ github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3 github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= @@ -850,8 +1134,10 @@ github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1 h1:9iF github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1/go.mod h1:K/JAU0m27RFhDRX4PcFdIKntROP6y5Ed6O91aZYDQfs= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc10/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc92/go.mod h1:X1zlU4p7wOlX4+WRCz+hvlRv8phdL7UqbYD+vQwNMmE= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc= @@ -861,6 +1147,7 @@ github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.m github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200728170252-4d89ac9fbff6/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -871,30 +1158,43 @@ github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xA github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opencontainers/selinux v1.10.1 h1:09LIPVRP3uuZGQvgR+SgMSNBd1Eb3vlRbGqQpoHsF8w= github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= +github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs= github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= github.com/phayes/permbits v0.0.0-20190612203442-39d7c581d2ee h1:P6U24L02WMfj9ymZTxl7CxS73JC99x3ukk+DBkgQGQs= github.com/phayes/permbits v0.0.0-20190612203442-39d7c581d2ee/go.mod h1:3uODdxMgOaPYeWU7RzZLxVtJHZ/x1f/iHkBZuKJDzuY= github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.5.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= @@ -904,12 +1204,15 @@ github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVD github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= @@ -919,7 +1222,9 @@ github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+ github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -932,19 +1237,31 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= +github.com/quasilyte/go-ruleguard v0.1.2-0.20200318202121-b00d7a75d3d8/go.mod h1:CGFX09Ci3pq9QZdj86B+VGIdNj4VyCo2iPOGS9esB/k= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.5.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rootless-containers/rootlesskit v1.0.0 h1:+DI5RQEZa4OOnkOixkrezFye0XLlSsdrtGSP6+g1254= github.com/rootless-containers/rootlesskit v1.0.0/go.mod h1:8Lo4zb73rSW3seB+a7UuO1gAoRD1pVkKMbXEY3NFNTE= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryancurrah/gomodguard v1.0.4/go.mod h1:9T/Cfuxs5StfsocWr4WzDL36HqnX0fVb9d5fSEaLhoE= +github.com/ryancurrah/gomodguard v1.1.0/go.mod h1:4O8tr7hBODaGE6VIhfJDHcwzh5GUccKSJBU0UMXJFVM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/sassoftware/go-rpmutils v0.0.0-20190420191620-a8f1baeba37b/go.mod h1:am+Fp8Bt506lA3Rk3QCmSqmYmLMnPDhdDUcosQCAx+I= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= @@ -952,6 +1269,15 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUt github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= +github.com/securego/gosec v0.0.0-20200103095621-79fbf3af8d83/go.mod h1:vvbZ2Ae7AzSq3/kywjUDxSNq2SJ27RxCz2un0H3ePqE= +github.com/securego/gosec v0.0.0-20200401082031-e946c8c39989/go.mod h1:i9l/TNj+yDFh9SZXUTvspXTjbFXgZGP/UvhU1S65A4A= +github.com/securego/gosec/v2 v2.3.0/go.mod h1:UzeVyUXbxukhLeHKV3VVqo7HdoQR9MrRfFmZYotn8ME= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002/go.mod h1:/yeG0My1xr/u+HZrFQ1tOQQQQrOawfyMUH13ai5brBc= +github.com/shirou/gopsutil v0.0.0-20190901111213-e4ec7b275ada/go.mod h1:WWnYX4lzhCH5h/3YBfyVA3VbLYjlMZZAQcW9ojMexNc= +github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= @@ -963,22 +1289,30 @@ github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8/go.mod h1:P5HUIBuIWKbyjl083/loAegFkfbFNx5i2qEP4CNbm7E= +github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE= +github.com/sourcegraph/go-diff v0.5.3/go.mod h1:v9JDtjCE4HHHCZGId75rg8gkKKa98RVjBcBGsVmMmak= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -986,7 +1320,9 @@ github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.6.1/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= @@ -995,6 +1331,7 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -1008,30 +1345,59 @@ github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69 github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/tdakkota/asciicheck v0.0.0-20200416190851-d7f85be797a2/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= +github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= +github.com/tetafro/godot v0.3.7/go.mod h1:/7NLHhv08H1+8DNj0MElpAACw1ajsCuf3TKNQxA5S+0= +github.com/tetafro/godot v0.4.2/go.mod h1:/7NLHhv08H1+8DNj0MElpAACw1ajsCuf3TKNQxA5S+0= github.com/thecodeteam/gosync v0.1.0/go.mod h1:43QHsngcnWc8GE1aCmi7PEypslflHjCzXFleuWKEb00= +github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= +github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= github.com/tinylib/msgp v1.1.0 h1:9fQd+ICuRIu/ue4vxJZu6/LzxN0HwMds2nq/0cFvxHU= github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0= +github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0= +github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao= +github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tommy-muehle/go-mnd v1.1.1/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig= +github.com/tommy-muehle/go-mnd v1.3.1-0.20200224220436-e6f9a994e8fa/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig= +github.com/tonistiigi/fsutil v0.0.0-20201103201449-0834f99b7b85/go.mod h1:a7cilN64dG941IOXfhJhlH0qB92hxJ9A1ewrdUmJ6xo= github.com/tonistiigi/fsutil v0.0.0-20220115021204-b19f7f9cb274 h1:wbyZxD6IPFp0sl5uscMOJRsz5UKGFiNiD16e+MVfKZY= github.com/tonistiigi/fsutil v0.0.0-20220115021204-b19f7f9cb274/go.mod h1:oPAfvw32vlUJSjyDcQ3Bu0nb2ON2B+G0dtVN/SZNJiA= +github.com/tonistiigi/go-actions-cache v0.0.0-20220404170428-0bdeb6e1eac7 h1:8eY6m1mjgyB8XySUR7WvebTM8D/Vs86jLJzD/Tw7zkc= +github.com/tonistiigi/go-actions-cache v0.0.0-20220404170428-0bdeb6e1eac7/go.mod h1:qqvyZqkfwkoJuPU/bw61bItaoO0SJ8YSW0vSVRRvsRg= github.com/tonistiigi/go-archvariant v1.0.0 h1:5LC1eDWiBNflnTF1prCiX09yfNHIxDC/aukdhCdTyb0= github.com/tonistiigi/go-archvariant v1.0.0/go.mod h1:TxFmO5VS6vMq2kvs3ht04iPXtu2rUT/erOnGFYfk5Ho= github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea h1:SXhTLE6pb6eld/v/cCndK0AMpt1wiVFb/YYmqB3/QG0= github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea/go.mod h1:WPnis/6cRcDZSUvVmezrxJPkiO87ThFYsoUiMwWNDJk= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= github.com/u-root/uio v0.0.0-20210528114334-82958018845c/go.mod h1:LpEX5FO/cB+WF4TYGY1V5qktpaZLkKkSegbr0V4eYXA= +github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= +github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= +github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= +github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.4.0/go.mod h1:NX9W0zmTvedE5oDoOMs2RTC8RvdK98NTYZE5LbaEYPg= +github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= +github.com/valyala/quicktemplate v1.2.0/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME= github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= +github.com/vdemeester/k8s-pkg-credentialprovider v1.17.4/go.mod h1:inCTmtUdr5KJbreVojo06krnTgaeAz/Z7lynpPk/Q2c= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= @@ -1043,12 +1409,16 @@ github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17 github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA= github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +github.com/xanzy/go-gitlab v0.31.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug= +github.com/xanzy/go-gitlab v0.32.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1064,6 +1434,7 @@ go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/api/v3 v3.5.2/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= @@ -1084,6 +1455,10 @@ go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVd go.etcd.io/etcd/server/v3 v3.5.2 h1:B6ytJvS4Fmt8nkjzS2/8POf4tuPhFMluE0lWd4dx/7U= go.etcd.io/etcd/server/v3 v3.5.2/go.mod h1:mlG8znIEz4N/28GABrohZCBM11FqgGVQcpbcyJgh0j0= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0= +go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= +go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A= +go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -1149,27 +1524,40 @@ go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9i go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +gocloud.dev v0.19.0/go.mod h1:SmKwiR8YwIMMJvQBKLsC3fHNyMwXLw3PMDO+VVteJMI= +golang.org/x/build v0.0.0-20190314133821-5284462c4bec/go.mod h1:atTaCNAy0f16Ah5aV1gMSwgiKVHwu/JncqDpuRr7lS4= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd h1:XcWmESyNjXJMLahc3mqVQJcgSTDxFxhETVlfk9uGc38= golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= @@ -1180,7 +1568,9 @@ golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EH golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -1203,15 +1593,21 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -1230,6 +1626,7 @@ golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1265,8 +1662,13 @@ golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f h1:hEYJvxw1lSnWIl8X9ofsYMklzaDs90JI2az5YMd4fPM= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/oauth2 v0.0.0-20180724155351-3d292e4d0cdc/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1282,6 +1684,7 @@ golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw= golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1294,14 +1697,21 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1316,6 +1726,7 @@ golang.org/x/sys v0.0.0-20190606122018-79a91cf218c4/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190620070143-6f217b454f45/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1331,6 +1742,7 @@ golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1338,6 +1750,7 @@ golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1363,10 +1776,12 @@ golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200917073148-efd3b9a0ff20/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201013081832-0aaa2718063a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1414,6 +1829,7 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1435,17 +1851,28 @@ golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 h1:GZokNIeuVkl3aZHJchRrr13WCsols02MLUcz1U9is6M= golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190221204921-83362c3779f5/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -1453,13 +1880,17 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190719005602-e377ae9d6386/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113232020-e2727e816f5a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1467,18 +1898,26 @@ golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200102140908-9497f49d5709/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204192400-7124308813f3/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200331202046-9d5940d49312/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200502202811-ed308ab3e770/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -1507,8 +1946,19 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.2.0/go.mod h1:IfRCZScioGtypHNTlz3gFk67J8uePVW7uDTBzXuIkhU= +google.golang.org/api v0.3.0/go.mod h1:IuvZyQh8jgscv8qWfQ4ABd8m7hEudgBFM/EdhA3BnXw= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.6.0/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4= +google.golang.org/api v0.6.1-0.20190607001116-5213b8090861/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1521,6 +1971,7 @@ google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.25.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= @@ -1537,6 +1988,8 @@ google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00 google.golang.org/api v0.54.0 h1:ECJUVngj71QI6XEm7b1sAf8BljU5inEhMbKPR8Lxhhk= google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -1546,11 +1999,17 @@ google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6 google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= @@ -1607,6 +2066,9 @@ google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEc google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -1667,9 +2129,11 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EV gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.56.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= @@ -1677,12 +2141,14 @@ gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76 gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= @@ -1696,6 +2162,9 @@ gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= gotest.tools/v3 v3.2.0 h1:I0DwBVMGAx26dttAj1BtJLAkVGncrkkUXfJLC4Flt/I= gotest.tools/v3 v3.2.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A= +grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1703,27 +2172,41 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.5/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.0.0-20180904230853-4e7be11eab3f/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= +k8s.io/api v0.17.4/go.mod h1:5qxx6vjmwUVG2nHQTKGlLts8Tbok8PzHl4vHtVFuZCA= +k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw= k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs= k8s.io/api v0.23.4/go.mod h1:i77F4JfyNNrhOjZF7OwwNJS5Y1S9dpwvb9iYRYRczfI= +k8s.io/apimachinery v0.0.0-20180904193909-def12e63c512/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= +k8s.io/apimachinery v0.17.4/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g= +k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U= k8s.io/apimachinery v0.23.4/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= +k8s.io/apiserver v0.17.4/go.mod h1:5ZDQ6Xr5MNBxyi3iUZXS84QOhZl+W7Oq2us/29c0j9I= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= k8s.io/apiserver v0.22.5/go.mod h1:s2WbtgZAkTKt679sYtSudEQrTGWUSQAPe6MupLnlmaQ= +k8s.io/client-go v0.0.0-20180910083459-2cefa64ff137/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= +k8s.io/client-go v0.17.4/go.mod h1:ouF6o5pz3is8qU0/qYL2RnoxOPqgfuidYLowytyLJmc= +k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y= k8s.io/client-go v0.23.4/go.mod h1:PKnIL4pqLuvYUK1WU7RLTMYKPiIh7MYShLshtRY9cj0= +k8s.io/cloud-provider v0.17.4/go.mod h1:XEjKDzfD+b9MTLXQFlDGkk6Ho8SGMpaU8Uugx/KNK9U= +k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s= k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= +k8s.io/component-base v0.17.4/go.mod h1:5BRqHMbbQPm2kKu35v3G+CpVq4K0RJKC7TRioF0I9lE= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= @@ -1734,26 +2217,48 @@ k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= k8s.io/cri-api v0.23.1/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4= k8s.io/cri-api v0.24.0-alpha.3/go.mod h1:c/NLI5Zdyup5+oEYqFO2IE32ptofNiZpS1nL2y51gAg= +k8s.io/csi-translation-lib v0.17.4/go.mod h1:CsxmjwxEI0tTNMzffIAcgR9lX4wOh6AKHdxQrT7L0oo= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= +k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= +k8s.io/kubernetes v1.11.10/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/legacy-cloud-providers v0.17.4/go.mod h1:FikRNoD64ECjkxO36gkDgJeiQWwyZTuBkhu+yxOc1Js= +k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= +modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= +modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= +modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= +mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= +mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= +mvdan.cc/unparam v0.0.0-20190720180237-d51796306d8f/go.mod h1:4G1h5nDURzA3bwVMZIVpwbkw+04kSxk3rAtzlimaUJw= +mvdan.cc/unparam v0.0.0-20200501210554-b37ab49443f7/go.mod h1:HGC5lll35J70Y5v7vCGb9oLhHoScFwkHDJm/05RdSTc= +pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= @@ -1761,6 +2266,8 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyz sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= +sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= +sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= @@ -1768,3 +2275,5 @@ sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZa sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= +sourcegraph.com/sqs/pbtypes v1.0.0/go.mod h1:3AciMUv4qUuRHRHhOG4TZOB+72GdPVz5k+c648qsFS4= diff --git a/vendor/github.com/dimchansky/utfbom/.gitignore b/vendor/github.com/dimchansky/utfbom/.gitignore new file mode 100644 index 0000000000000..d7ec5cebb98d7 --- /dev/null +++ b/vendor/github.com/dimchansky/utfbom/.gitignore @@ -0,0 +1,37 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib +*.o +*.a + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.prof + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ + +# Gogland +.idea/ \ No newline at end of file diff --git a/vendor/github.com/dimchansky/utfbom/.travis.yml b/vendor/github.com/dimchansky/utfbom/.travis.yml new file mode 100644 index 0000000000000..19312ee35fc06 --- /dev/null +++ b/vendor/github.com/dimchansky/utfbom/.travis.yml @@ -0,0 +1,29 @@ +language: go +sudo: false + +go: + - 1.10.x + - 1.11.x + - 1.12.x + - 1.13.x + - 1.14.x + - 1.15.x + +cache: + directories: + - $HOME/.cache/go-build + - $HOME/gopath/pkg/mod + +env: + global: + - GO111MODULE=on + +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover + - go get golang.org/x/tools/cmd/goimports + - go get golang.org/x/lint/golint +script: + - gofiles=$(find ./ -name '*.go') && [ -z "$gofiles" ] || unformatted=$(goimports -l $gofiles) && [ -z "$unformatted" ] || (echo >&2 "Go files must be formatted with gofmt. Following files has problem:\n $unformatted" && false) + - golint ./... # This won't break the build, just show warnings + - $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/dimchansky/utfbom/LICENSE b/vendor/github.com/dimchansky/utfbom/LICENSE new file mode 100644 index 0000000000000..6279cb87f4348 --- /dev/null +++ b/vendor/github.com/dimchansky/utfbom/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright (c) 2018-2020, Dmitrij Koniajev (dimchansky@gmail.com) + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/dimchansky/utfbom/README.md b/vendor/github.com/dimchansky/utfbom/README.md new file mode 100644 index 0000000000000..8ece280089a5d --- /dev/null +++ b/vendor/github.com/dimchansky/utfbom/README.md @@ -0,0 +1,66 @@ +# utfbom [![Godoc](https://godoc.org/github.com/dimchansky/utfbom?status.png)](https://godoc.org/github.com/dimchansky/utfbom) [![License](https://img.shields.io/:license-apache-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![Build Status](https://travis-ci.org/dimchansky/utfbom.svg?branch=master)](https://travis-ci.org/dimchansky/utfbom) [![Go Report Card](https://goreportcard.com/badge/github.com/dimchansky/utfbom)](https://goreportcard.com/report/github.com/dimchansky/utfbom) [![Coverage Status](https://coveralls.io/repos/github/dimchansky/utfbom/badge.svg?branch=master)](https://coveralls.io/github/dimchansky/utfbom?branch=master) + +The package utfbom implements the detection of the BOM (Unicode Byte Order Mark) and removing as necessary. It can also return the encoding detected by the BOM. + +## Installation + + go get -u github.com/dimchansky/utfbom + +## Example + +```go +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + + "github.com/dimchansky/utfbom" +) + +func main() { + trySkip([]byte("\xEF\xBB\xBFhello")) + trySkip([]byte("hello")) +} + +func trySkip(byteData []byte) { + fmt.Println("Input:", byteData) + + // just skip BOM + output, err := ioutil.ReadAll(utfbom.SkipOnly(bytes.NewReader(byteData))) + if err != nil { + fmt.Println(err) + return + } + fmt.Println("ReadAll with BOM skipping", output) + + // skip BOM and detect encoding + sr, enc := utfbom.Skip(bytes.NewReader(byteData)) + fmt.Printf("Detected encoding: %s\n", enc) + output, err = ioutil.ReadAll(sr) + if err != nil { + fmt.Println(err) + return + } + fmt.Println("ReadAll with BOM detection and skipping", output) + fmt.Println() +} +``` + +Output: + +``` +$ go run main.go +Input: [239 187 191 104 101 108 108 111] +ReadAll with BOM skipping [104 101 108 108 111] +Detected encoding: UTF8 +ReadAll with BOM detection and skipping [104 101 108 108 111] + +Input: [104 101 108 108 111] +ReadAll with BOM skipping [104 101 108 108 111] +Detected encoding: Unknown +ReadAll with BOM detection and skipping [104 101 108 108 111] +``` + + diff --git a/vendor/github.com/dimchansky/utfbom/utfbom.go b/vendor/github.com/dimchansky/utfbom/utfbom.go new file mode 100644 index 0000000000000..77a303e564b16 --- /dev/null +++ b/vendor/github.com/dimchansky/utfbom/utfbom.go @@ -0,0 +1,192 @@ +// Package utfbom implements the detection of the BOM (Unicode Byte Order Mark) and removing as necessary. +// It wraps an io.Reader object, creating another object (Reader) that also implements the io.Reader +// interface but provides automatic BOM checking and removing as necessary. +package utfbom + +import ( + "errors" + "io" +) + +// Encoding is type alias for detected UTF encoding. +type Encoding int + +// Constants to identify detected UTF encodings. +const ( + // Unknown encoding, returned when no BOM was detected + Unknown Encoding = iota + + // UTF8, BOM bytes: EF BB BF + UTF8 + + // UTF-16, big-endian, BOM bytes: FE FF + UTF16BigEndian + + // UTF-16, little-endian, BOM bytes: FF FE + UTF16LittleEndian + + // UTF-32, big-endian, BOM bytes: 00 00 FE FF + UTF32BigEndian + + // UTF-32, little-endian, BOM bytes: FF FE 00 00 + UTF32LittleEndian +) + +// String returns a user-friendly string representation of the encoding. Satisfies fmt.Stringer interface. +func (e Encoding) String() string { + switch e { + case UTF8: + return "UTF8" + case UTF16BigEndian: + return "UTF16BigEndian" + case UTF16LittleEndian: + return "UTF16LittleEndian" + case UTF32BigEndian: + return "UTF32BigEndian" + case UTF32LittleEndian: + return "UTF32LittleEndian" + default: + return "Unknown" + } +} + +const maxConsecutiveEmptyReads = 100 + +// Skip creates Reader which automatically detects BOM (Unicode Byte Order Mark) and removes it as necessary. +// It also returns the encoding detected by the BOM. +// If the detected encoding is not needed, you can call the SkipOnly function. +func Skip(rd io.Reader) (*Reader, Encoding) { + // Is it already a Reader? + b, ok := rd.(*Reader) + if ok { + return b, Unknown + } + + enc, left, err := detectUtf(rd) + return &Reader{ + rd: rd, + buf: left, + err: err, + }, enc +} + +// SkipOnly creates Reader which automatically detects BOM (Unicode Byte Order Mark) and removes it as necessary. +func SkipOnly(rd io.Reader) *Reader { + r, _ := Skip(rd) + return r +} + +// Reader implements automatic BOM (Unicode Byte Order Mark) checking and +// removing as necessary for an io.Reader object. +type Reader struct { + rd io.Reader // reader provided by the client + buf []byte // buffered data + err error // last error +} + +// Read is an implementation of io.Reader interface. +// The bytes are taken from the underlying Reader, but it checks for BOMs, removing them as necessary. +func (r *Reader) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return 0, nil + } + + if r.buf == nil { + if r.err != nil { + return 0, r.readErr() + } + + return r.rd.Read(p) + } + + // copy as much as we can + n = copy(p, r.buf) + r.buf = nilIfEmpty(r.buf[n:]) + return n, nil +} + +func (r *Reader) readErr() error { + err := r.err + r.err = nil + return err +} + +var errNegativeRead = errors.New("utfbom: reader returned negative count from Read") + +func detectUtf(rd io.Reader) (enc Encoding, buf []byte, err error) { + buf, err = readBOM(rd) + + if len(buf) >= 4 { + if isUTF32BigEndianBOM4(buf) { + return UTF32BigEndian, nilIfEmpty(buf[4:]), err + } + if isUTF32LittleEndianBOM4(buf) { + return UTF32LittleEndian, nilIfEmpty(buf[4:]), err + } + } + + if len(buf) > 2 && isUTF8BOM3(buf) { + return UTF8, nilIfEmpty(buf[3:]), err + } + + if (err != nil && err != io.EOF) || (len(buf) < 2) { + return Unknown, nilIfEmpty(buf), err + } + + if isUTF16BigEndianBOM2(buf) { + return UTF16BigEndian, nilIfEmpty(buf[2:]), err + } + if isUTF16LittleEndianBOM2(buf) { + return UTF16LittleEndian, nilIfEmpty(buf[2:]), err + } + + return Unknown, nilIfEmpty(buf), err +} + +func readBOM(rd io.Reader) (buf []byte, err error) { + const maxBOMSize = 4 + var bom [maxBOMSize]byte // used to read BOM + + // read as many bytes as possible + for nEmpty, n := 0, 0; err == nil && len(buf) < maxBOMSize; buf = bom[:len(buf)+n] { + if n, err = rd.Read(bom[len(buf):]); n < 0 { + panic(errNegativeRead) + } + if n > 0 { + nEmpty = 0 + } else { + nEmpty++ + if nEmpty >= maxConsecutiveEmptyReads { + err = io.ErrNoProgress + } + } + } + return +} + +func isUTF32BigEndianBOM4(buf []byte) bool { + return buf[0] == 0x00 && buf[1] == 0x00 && buf[2] == 0xFE && buf[3] == 0xFF +} + +func isUTF32LittleEndianBOM4(buf []byte) bool { + return buf[0] == 0xFF && buf[1] == 0xFE && buf[2] == 0x00 && buf[3] == 0x00 +} + +func isUTF8BOM3(buf []byte) bool { + return buf[0] == 0xEF && buf[1] == 0xBB && buf[2] == 0xBF +} + +func isUTF16BigEndianBOM2(buf []byte) bool { + return buf[0] == 0xFE && buf[1] == 0xFF +} + +func isUTF16LittleEndianBOM2(buf []byte) bool { + return buf[0] == 0xFF && buf[1] == 0xFE +} + +func nilIfEmpty(buf []byte) (res []byte) { + if len(buf) > 0 { + res = buf + } + return +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/.gitignore b/vendor/github.com/golang-jwt/jwt/v4/.gitignore new file mode 100644 index 0000000000000..09573e0169c21 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/.gitignore @@ -0,0 +1,4 @@ +.DS_Store +bin +.idea/ + diff --git a/vendor/github.com/golang-jwt/jwt/v4/LICENSE b/vendor/github.com/golang-jwt/jwt/v4/LICENSE new file mode 100644 index 0000000000000..35dbc252041ea --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) 2012 Dave Grijalva +Copyright (c) 2021 golang-jwt maintainers + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md b/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md new file mode 100644 index 0000000000000..32966f59818e4 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/MIGRATION_GUIDE.md @@ -0,0 +1,22 @@ +## Migration Guide (v4.0.0) + +Starting from [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0), the import path will be: + + "github.com/golang-jwt/jwt/v4" + +The `/v4` version will be backwards compatible with existing `v3.x.y` tags in this repo, as well as +`github.com/dgrijalva/jwt-go`. For most users this should be a drop-in replacement, if you're having +troubles migrating, please open an issue. + +You can replace all occurrences of `github.com/dgrijalva/jwt-go` or `github.com/golang-jwt/jwt` with `github.com/golang-jwt/jwt/v4`, either manually or by using tools such as `sed` or `gofmt`. + +And then you'd typically run: + +``` +go get github.com/golang-jwt/jwt/v4 +go mod tidy +``` + +## Older releases (before v3.2.0) + +The original migration guide for older releases can be found at https://github.com/dgrijalva/jwt-go/blob/master/MIGRATION_GUIDE.md. diff --git a/vendor/github.com/golang-jwt/jwt/v4/README.md b/vendor/github.com/golang-jwt/jwt/v4/README.md new file mode 100644 index 0000000000000..3072d24a9da6b --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/README.md @@ -0,0 +1,114 @@ +# jwt-go + +[![build](https://github.com/golang-jwt/jwt/actions/workflows/build.yml/badge.svg)](https://github.com/golang-jwt/jwt/actions/workflows/build.yml) +[![Go Reference](https://pkg.go.dev/badge/github.com/golang-jwt/jwt/v4.svg)](https://pkg.go.dev/github.com/golang-jwt/jwt/v4) + +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](https://datatracker.ietf.org/doc/html/rfc7519). + +Starting with [v4.0.0](https://github.com/golang-jwt/jwt/releases/tag/v4.0.0) this project adds Go module support, but maintains backwards compatibility with older `v3.x.y` tags and upstream `github.com/dgrijalva/jwt-go`. +See the [`MIGRATION_GUIDE.md`](./MIGRATION_GUIDE.md) for more information. + +> After the original author of the library suggested migrating the maintenance of `jwt-go`, a dedicated team of open source maintainers decided to clone the existing library into this repository. See [dgrijalva/jwt-go#462](https://github.com/dgrijalva/jwt-go/issues/462) for a detailed discussion on this topic. + + +**SECURITY NOTICE:** Some older versions of Go have a security issue in the crypto/elliptic. Recommendation is to upgrade to at least 1.15 See issue [dgrijalva/jwt-go#216](https://github.com/dgrijalva/jwt-go/issues/216) for more detail. + +**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. + +### Supported Go versions + +Our support of Go versions is aligned with Go's [version release policy](https://golang.org/doc/devel/release#policy). +So we will support a major version of Go until there are two newer major releases. +We no longer support building jwt-go with unsupported Go versions, as these contain security vulnerabilities +which will not be fixed. + +## What the heck is a JWT? + +JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. + +In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](https://datatracker.ietf.org/doc/html/rfc4648) encoded. The last part is the signature, encoded the same way. + +The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. + +The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [RFC 7519](https://datatracker.ietf.org/doc/html/rfc7519) for information about reserved keys and the proper way to add your own. + +## What's in the box? + +This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. + +## Examples + +See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt) for examples of usage: + +* [Simple example of parsing and validating a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-Parse-Hmac) +* [Simple example of building and signing a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-New-Hmac) +* [Directory of Examples](https://pkg.go.dev/github.com/golang-jwt/jwt#pkg-examples) + +## Extensions + +This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. + +Here's an example of an extension that integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS): https://github.com/someone1/gcp-jwt-go + +## Compliance + +This library was last reviewed to comply with [RFC 7519](https://datatracker.ietf.org/doc/html/rfc7519) dated May 2015 with a few notable differences: + +* In order to protect against accidental use of [Unsecured JWTs](https://datatracker.ietf.org/doc/html/rfc7519#section-6), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. + +## Project Status & Versioning + +This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). + +This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `main`. Periodically, versions will be tagged from `main`. You can find all the releases on [the project releases page](https://github.com/golang-jwt/jwt/releases). + +**BREAKING CHANGES:*** +A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. + +## Usage Tips + +### Signing vs Encryption + +A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: + +* The author of the token was in the possession of the signing secret +* The data has not been modified since it was signed + +It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. + +### Choosing a Signing Method + +There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. + +Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. + +Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. + +### Signing Methods and Key Types + +Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: + +* The [HMAC signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation +* The [RSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation +* The [ECDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation +* The [EdDSA signing method](https://pkg.go.dev/github.com/golang-jwt/jwt#SigningMethodEd25519) (`Ed25519`) expect `ed25519.PrivateKey` for signing and `ed25519.PublicKey` for validation + +### JWT and OAuth + +It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. + +Without going too far down the rabbit hole, here's a description of the interaction of these technologies: + +* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. +* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. +* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. + +### Troubleshooting + +This library uses descriptive error messages whenever possible. If you are not getting the expected result, have a look at the errors. The most common place people get stuck is providing the correct type of key to the parser. See the above section on signing methods and key types. + +## More + +Documentation can be found [on pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt). + +The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. diff --git a/vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md b/vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md new file mode 100644 index 0000000000000..afbfc4e408d1c --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/VERSION_HISTORY.md @@ -0,0 +1,135 @@ +## `jwt-go` Version History + +#### 4.0.0 + +* Introduces support for Go modules. The `v4` version will be backwards compatible with `v3.x.y`. + +#### 3.2.2 + +* Starting from this release, we are adopting the policy to support the most 2 recent versions of Go currently available. By the time of this release, this is Go 1.15 and 1.16 ([#28](https://github.com/golang-jwt/jwt/pull/28)). +* Fixed a potential issue that could occur when the verification of `exp`, `iat` or `nbf` was not required and contained invalid contents, i.e. non-numeric/date. Thanks for @thaJeztah for making us aware of that and @giorgos-f3 for originally reporting it to the formtech fork ([#40](https://github.com/golang-jwt/jwt/pull/40)). +* Added support for EdDSA / ED25519 ([#36](https://github.com/golang-jwt/jwt/pull/36)). +* Optimized allocations ([#33](https://github.com/golang-jwt/jwt/pull/33)). + +#### 3.2.1 + +* **Import Path Change**: See MIGRATION_GUIDE.md for tips on updating your code + * Changed the import path from `github.com/dgrijalva/jwt-go` to `github.com/golang-jwt/jwt` +* Fixed type confusing issue between `string` and `[]string` in `VerifyAudience` ([#12](https://github.com/golang-jwt/jwt/pull/12)). This fixes CVE-2020-26160 + +#### 3.2.0 + +* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation +* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate +* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before. +* Deprecated `ParseFromRequestWithClaims` to simplify API in the future. + +#### 3.1.0 + +* Improvements to `jwt` command line tool +* Added `SkipClaimsValidation` option to `Parser` +* Documentation updates + +#### 3.0.0 + +* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code + * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. + * `ParseFromRequest` has been moved to `request` subpackage and usage has changed + * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. +* Other Additions and Changes + * Added `Claims` interface type to allow users to decode the claims into a custom type + * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. + * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage + * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` + * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. + * Added several new, more specific, validation errors to error type bitmask + * Moved examples from README to executable example files + * Signing method registry is now thread safe + * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) + +#### 2.7.0 + +This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. + +* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying +* Error text for expired tokens includes how long it's been expired +* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` +* Documentation updates + +#### 2.6.0 + +* Exposed inner error within ValidationError +* Fixed validation errors when using UseJSONNumber flag +* Added several unit tests + +#### 2.5.0 + +* Added support for signing method none. You shouldn't use this. The API tries to make this clear. +* Updated/fixed some documentation +* Added more helpful error message when trying to parse tokens that begin with `BEARER ` + +#### 2.4.0 + +* Added new type, Parser, to allow for configuration of various parsing parameters + * You can now specify a list of valid signing methods. Anything outside this set will be rejected. + * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON +* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) +* Fixed some bugs with ECDSA parsing + +#### 2.3.0 + +* Added support for ECDSA signing methods +* Added support for RSA PSS signing methods (requires go v1.4) + +#### 2.2.0 + +* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. + +#### 2.1.0 + +Backwards compatible API change that was missed in 2.0.0. + +* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` + +#### 2.0.0 + +There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. + +The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. + +It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. + +* **Compatibility Breaking Changes** + * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` + * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` + * `KeyFunc` now returns `interface{}` instead of `[]byte` + * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key + * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key +* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodHS256` + * Added public package global `SigningMethodHS384` + * Added public package global `SigningMethodHS512` +* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodRS256` + * Added public package global `SigningMethodRS384` + * Added public package global `SigningMethodRS512` +* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. +* Refactored the RSA implementation to be easier to read +* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` + +#### 1.0.2 + +* Fixed bug in parsing public keys from certificates +* Added more tests around the parsing of keys for RS256 +* Code refactoring in RS256 implementation. No functional changes + +#### 1.0.1 + +* Fixed panic if RS256 signing method was passed an invalid key + +#### 1.0.0 + +* First versioned release +* API stabilized +* Supports creating, signing, parsing, and validating JWT tokens +* Supports RS256 and HS256 signing methods diff --git a/vendor/github.com/golang-jwt/jwt/v4/claims.go b/vendor/github.com/golang-jwt/jwt/v4/claims.go new file mode 100644 index 0000000000000..b07ac02de0941 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/claims.go @@ -0,0 +1,267 @@ +package jwt + +import ( + "crypto/subtle" + "fmt" + "time" +) + +// Claims must just have a Valid method that determines +// if the token is invalid for any supported reason +type Claims interface { + Valid() error +} + +// RegisteredClaims are a structured version of the JWT Claims Set, +// restricted to Registered Claim Names, as referenced at +// https://datatracker.ietf.org/doc/html/rfc7519#section-4.1 +// +// This type can be used on its own, but then additional private and +// public claims embedded in the JWT will not be parsed. The typical usecase +// therefore is to embedded this in a user-defined claim type. +// +// See examples for how to use this with your own claim types. +type RegisteredClaims struct { + // the `iss` (Issuer) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.1 + Issuer string `json:"iss,omitempty"` + + // the `sub` (Subject) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.2 + Subject string `json:"sub,omitempty"` + + // the `aud` (Audience) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.3 + Audience ClaimStrings `json:"aud,omitempty"` + + // the `exp` (Expiration Time) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.4 + ExpiresAt *NumericDate `json:"exp,omitempty"` + + // the `nbf` (Not Before) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.5 + NotBefore *NumericDate `json:"nbf,omitempty"` + + // the `iat` (Issued At) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.6 + IssuedAt *NumericDate `json:"iat,omitempty"` + + // the `jti` (JWT ID) claim. See https://datatracker.ietf.org/doc/html/rfc7519#section-4.1.7 + ID string `json:"jti,omitempty"` +} + +// Valid validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (c RegisteredClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc() + + // The claims below are optional, by default, so if they are set to the + // default value in Go, let's not fail the verification for them. + if !c.VerifyExpiresAt(now, false) { + delta := now.Sub(c.ExpiresAt.Time) + vErr.Inner = fmt.Errorf("token is expired by %v", delta) + vErr.Errors |= ValidationErrorExpired + } + + if !c.VerifyIssuedAt(now, false) { + vErr.Inner = fmt.Errorf("token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if !c.VerifyNotBefore(now, false) { + vErr.Inner = fmt.Errorf("token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} + +// VerifyAudience compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *RegisteredClaims) VerifyAudience(cmp string, req bool) bool { + return verifyAud(c.Audience, cmp, req) +} + +// VerifyExpiresAt compares the exp claim against cmp (cmp <= exp). +// If req is false, it will return true, if exp is unset. +func (c *RegisteredClaims) VerifyExpiresAt(cmp time.Time, req bool) bool { + if c.ExpiresAt == nil { + return verifyExp(nil, cmp, req) + } + + return verifyExp(&c.ExpiresAt.Time, cmp, req) +} + +// VerifyIssuedAt compares the iat claim against cmp (cmp >= iat). +// If req is false, it will return true, if iat is unset. +func (c *RegisteredClaims) VerifyIssuedAt(cmp time.Time, req bool) bool { + if c.IssuedAt == nil { + return verifyIat(nil, cmp, req) + } + + return verifyIat(&c.IssuedAt.Time, cmp, req) +} + +// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf). +// If req is false, it will return true, if nbf is unset. +func (c *RegisteredClaims) VerifyNotBefore(cmp time.Time, req bool) bool { + if c.NotBefore == nil { + return verifyNbf(nil, cmp, req) + } + + return verifyNbf(&c.NotBefore.Time, cmp, req) +} + +// StandardClaims are a structured version of the JWT Claims Set, as referenced at +// https://datatracker.ietf.org/doc/html/rfc7519#section-4. They do not follow the +// specification exactly, since they were based on an earlier draft of the +// specification and not updated. The main difference is that they only +// support integer-based date fields and singular audiences. This might lead to +// incompatibilities with other JWT implementations. The use of this is discouraged, instead +// the newer RegisteredClaims struct should be used. +// +// Deprecated: Use RegisteredClaims instead for a forward-compatible way to access registered claims in a struct. +type StandardClaims struct { + Audience string `json:"aud,omitempty"` + ExpiresAt int64 `json:"exp,omitempty"` + Id string `json:"jti,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` + Issuer string `json:"iss,omitempty"` + NotBefore int64 `json:"nbf,omitempty"` + Subject string `json:"sub,omitempty"` +} + +// Valid validates time based claims "exp, iat, nbf". There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (c StandardClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + // The claims below are optional, by default, so if they are set to the + // default value in Go, let's not fail the verification for them. + if !c.VerifyExpiresAt(now, false) { + delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) + vErr.Inner = fmt.Errorf("token is expired by %v", delta) + vErr.Errors |= ValidationErrorExpired + } + + if !c.VerifyIssuedAt(now, false) { + vErr.Inner = fmt.Errorf("token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if !c.VerifyNotBefore(now, false) { + vErr.Inner = fmt.Errorf("token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} + +// VerifyAudience compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { + return verifyAud([]string{c.Audience}, cmp, req) +} + +// VerifyExpiresAt compares the exp claim against cmp (cmp <= exp). +// If req is false, it will return true, if exp is unset. +func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { + if c.ExpiresAt == 0 { + return verifyExp(nil, time.Unix(cmp, 0), req) + } + + t := time.Unix(c.ExpiresAt, 0) + return verifyExp(&t, time.Unix(cmp, 0), req) +} + +// VerifyIssuedAt compares the iat claim against cmp (cmp >= iat). +// If req is false, it will return true, if iat is unset. +func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { + if c.IssuedAt == 0 { + return verifyIat(nil, time.Unix(cmp, 0), req) + } + + t := time.Unix(c.IssuedAt, 0) + return verifyIat(&t, time.Unix(cmp, 0), req) +} + +// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf). +// If req is false, it will return true, if nbf is unset. +func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { + if c.NotBefore == 0 { + return verifyNbf(nil, time.Unix(cmp, 0), req) + } + + t := time.Unix(c.NotBefore, 0) + return verifyNbf(&t, time.Unix(cmp, 0), req) +} + +// VerifyIssuer compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { + return verifyIss(c.Issuer, cmp, req) +} + +// ----- helpers + +func verifyAud(aud []string, cmp string, required bool) bool { + if len(aud) == 0 { + return !required + } + // use a var here to keep constant time compare when looping over a number of claims + result := false + + var stringClaims string + for _, a := range aud { + if subtle.ConstantTimeCompare([]byte(a), []byte(cmp)) != 0 { + result = true + } + stringClaims = stringClaims + a + } + + // case where "" is sent in one or many aud claims + if len(stringClaims) == 0 { + return !required + } + + return result +} + +func verifyExp(exp *time.Time, now time.Time, required bool) bool { + if exp == nil { + return !required + } + return now.Before(*exp) +} + +func verifyIat(iat *time.Time, now time.Time, required bool) bool { + if iat == nil { + return !required + } + return now.After(*iat) || now.Equal(*iat) +} + +func verifyNbf(nbf *time.Time, now time.Time, required bool) bool { + if nbf == nil { + return !required + } + return now.After(*nbf) || now.Equal(*nbf) +} + +func verifyIss(iss string, cmp string, required bool) bool { + if iss == "" { + return !required + } + if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 { + return true + } else { + return false + } +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/doc.go b/vendor/github.com/golang-jwt/jwt/v4/doc.go new file mode 100644 index 0000000000000..a86dc1a3b348c --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/doc.go @@ -0,0 +1,4 @@ +// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html +// +// See README.md for more info. +package jwt diff --git a/vendor/github.com/golang-jwt/jwt/v4/ecdsa.go b/vendor/github.com/golang-jwt/jwt/v4/ecdsa.go new file mode 100644 index 0000000000000..eac023fc6c84e --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/ecdsa.go @@ -0,0 +1,142 @@ +package jwt + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "errors" + "math/big" +) + +var ( + // Sadly this is missing from crypto/ecdsa compared to crypto/rsa + ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") +) + +// SigningMethodECDSA implements the ECDSA family of signing methods. +// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification +type SigningMethodECDSA struct { + Name string + Hash crypto.Hash + KeySize int + CurveBits int +} + +// Specific instances for EC256 and company +var ( + SigningMethodES256 *SigningMethodECDSA + SigningMethodES384 *SigningMethodECDSA + SigningMethodES512 *SigningMethodECDSA +) + +func init() { + // ES256 + SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} + RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { + return SigningMethodES256 + }) + + // ES384 + SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} + RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { + return SigningMethodES384 + }) + + // ES512 + SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} + RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { + return SigningMethodES512 + }) +} + +func (m *SigningMethodECDSA) Alg() string { + return m.Name +} + +// Verify implements token verification for the SigningMethod. +// For this verify method, key must be an ecdsa.PublicKey struct +func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Get the key + var ecdsaKey *ecdsa.PublicKey + switch k := key.(type) { + case *ecdsa.PublicKey: + ecdsaKey = k + default: + return ErrInvalidKeyType + } + + if len(sig) != 2*m.KeySize { + return ErrECDSAVerification + } + + r := big.NewInt(0).SetBytes(sig[:m.KeySize]) + s := big.NewInt(0).SetBytes(sig[m.KeySize:]) + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus { + return nil + } + + return ErrECDSAVerification +} + +// Sign implements token signing for the SigningMethod. +// For this signing method, key must be an ecdsa.PrivateKey struct +func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { + // Get the key + var ecdsaKey *ecdsa.PrivateKey + switch k := key.(type) { + case *ecdsa.PrivateKey: + ecdsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return r, s + if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { + curveBits := ecdsaKey.Curve.Params().BitSize + + if m.CurveBits != curveBits { + return "", ErrInvalidKey + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes += 1 + } + + // We serialize the outputs (r and s) into big-endian byte arrays + // padded with zeros on the left to make sure the sizes work out. + // Output must be 2*keyBytes long. + out := make([]byte, 2*keyBytes) + r.FillBytes(out[0:keyBytes]) // r is assigned to the first half of output. + s.FillBytes(out[keyBytes:]) // s is assigned to the second half of output. + + return EncodeSegment(out), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go b/vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go new file mode 100644 index 0000000000000..5700636d35b6c --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/ecdsa_utils.go @@ -0,0 +1,69 @@ +package jwt + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotECPublicKey = errors.New("key is not a valid ECDSA public key") + ErrNotECPrivateKey = errors.New("key is not a valid ECDSA private key") +) + +// ParseECPrivateKeyFromPEM parses a PEM encoded Elliptic Curve Private Key Structure +func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *ecdsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, ErrNotECPrivateKey + } + + return pkey, nil +} + +// ParseECPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key +func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *ecdsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, ErrNotECPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/ed25519.go b/vendor/github.com/golang-jwt/jwt/v4/ed25519.go new file mode 100644 index 0000000000000..07d3aacd631fa --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/ed25519.go @@ -0,0 +1,85 @@ +package jwt + +import ( + "errors" + + "crypto" + "crypto/ed25519" + "crypto/rand" +) + +var ( + ErrEd25519Verification = errors.New("ed25519: verification error") +) + +// SigningMethodEd25519 implements the EdDSA family. +// Expects ed25519.PrivateKey for signing and ed25519.PublicKey for verification +type SigningMethodEd25519 struct{} + +// Specific instance for EdDSA +var ( + SigningMethodEdDSA *SigningMethodEd25519 +) + +func init() { + SigningMethodEdDSA = &SigningMethodEd25519{} + RegisterSigningMethod(SigningMethodEdDSA.Alg(), func() SigningMethod { + return SigningMethodEdDSA + }) +} + +func (m *SigningMethodEd25519) Alg() string { + return "EdDSA" +} + +// Verify implements token verification for the SigningMethod. +// For this verify method, key must be an ed25519.PublicKey +func (m *SigningMethodEd25519) Verify(signingString, signature string, key interface{}) error { + var err error + var ed25519Key ed25519.PublicKey + var ok bool + + if ed25519Key, ok = key.(ed25519.PublicKey); !ok { + return ErrInvalidKeyType + } + + if len(ed25519Key) != ed25519.PublicKeySize { + return ErrInvalidKey + } + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Verify the signature + if !ed25519.Verify(ed25519Key, []byte(signingString), sig) { + return ErrEd25519Verification + } + + return nil +} + +// Sign implements token signing for the SigningMethod. +// For this signing method, key must be an ed25519.PrivateKey +func (m *SigningMethodEd25519) Sign(signingString string, key interface{}) (string, error) { + var ed25519Key crypto.Signer + var ok bool + + if ed25519Key, ok = key.(crypto.Signer); !ok { + return "", ErrInvalidKeyType + } + + if _, ok := ed25519Key.Public().(ed25519.PublicKey); !ok { + return "", ErrInvalidKey + } + + // Sign the string and return the encoded result + // ed25519 performs a two-pass hash as part of its algorithm. Therefore, we need to pass a non-prehashed message into the Sign function, as indicated by crypto.Hash(0) + sig, err := ed25519Key.Sign(rand.Reader, []byte(signingString), crypto.Hash(0)) + if err != nil { + return "", err + } + return EncodeSegment(sig), nil +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go b/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go new file mode 100644 index 0000000000000..cdb5e68e8767a --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/ed25519_utils.go @@ -0,0 +1,64 @@ +package jwt + +import ( + "crypto" + "crypto/ed25519" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotEdPrivateKey = errors.New("key is not a valid Ed25519 private key") + ErrNotEdPublicKey = errors.New("key is not a valid Ed25519 public key") +) + +// ParseEdPrivateKeyFromPEM parses a PEM-encoded Edwards curve private key +func ParseEdPrivateKeyFromPEM(key []byte) (crypto.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + + var pkey ed25519.PrivateKey + var ok bool + if pkey, ok = parsedKey.(ed25519.PrivateKey); !ok { + return nil, ErrNotEdPrivateKey + } + + return pkey, nil +} + +// ParseEdPublicKeyFromPEM parses a PEM-encoded Edwards curve public key +func ParseEdPublicKeyFromPEM(key []byte) (crypto.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + return nil, err + } + + var pkey ed25519.PublicKey + var ok bool + if pkey, ok = parsedKey.(ed25519.PublicKey); !ok { + return nil, ErrNotEdPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/errors.go b/vendor/github.com/golang-jwt/jwt/v4/errors.go new file mode 100644 index 0000000000000..f309878b30d6e --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/errors.go @@ -0,0 +1,59 @@ +package jwt + +import ( + "errors" +) + +// Error constants +var ( + ErrInvalidKey = errors.New("key is invalid") + ErrInvalidKeyType = errors.New("key is of invalid type") + ErrHashUnavailable = errors.New("the requested hash function is unavailable") +) + +// The errors that might occur when parsing and validating a token +const ( + ValidationErrorMalformed uint32 = 1 << iota // Token is malformed + ValidationErrorUnverifiable // Token could not be verified because of signing problems + ValidationErrorSignatureInvalid // Signature validation failed + + // Standard Claim validation errors + ValidationErrorAudience // AUD validation failed + ValidationErrorExpired // EXP validation failed + ValidationErrorIssuedAt // IAT validation failed + ValidationErrorIssuer // ISS validation failed + ValidationErrorNotValidYet // NBF validation failed + ValidationErrorId // JTI validation failed + ValidationErrorClaimsInvalid // Generic claims validation error +) + +// NewValidationError is a helper for constructing a ValidationError with a string error message +func NewValidationError(errorText string, errorFlags uint32) *ValidationError { + return &ValidationError{ + text: errorText, + Errors: errorFlags, + } +} + +// ValidationError represents an error from Parse if token is not valid +type ValidationError struct { + Inner error // stores the error returned by external dependencies, i.e.: KeyFunc + Errors uint32 // bitfield. see ValidationError... constants + text string // errors that do not have a valid error just have text +} + +// Error is the implementation of the err interface. +func (e ValidationError) Error() string { + if e.Inner != nil { + return e.Inner.Error() + } else if e.text != "" { + return e.text + } else { + return "token is invalid" + } +} + +// No errors +func (e *ValidationError) valid() bool { + return e.Errors == 0 +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/hmac.go b/vendor/github.com/golang-jwt/jwt/v4/hmac.go new file mode 100644 index 0000000000000..011f68a274402 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/hmac.go @@ -0,0 +1,95 @@ +package jwt + +import ( + "crypto" + "crypto/hmac" + "errors" +) + +// SigningMethodHMAC implements the HMAC-SHA family of signing methods. +// Expects key type of []byte for both signing and validation +type SigningMethodHMAC struct { + Name string + Hash crypto.Hash +} + +// Specific instances for HS256 and company +var ( + SigningMethodHS256 *SigningMethodHMAC + SigningMethodHS384 *SigningMethodHMAC + SigningMethodHS512 *SigningMethodHMAC + ErrSignatureInvalid = errors.New("signature is invalid") +) + +func init() { + // HS256 + SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { + return SigningMethodHS256 + }) + + // HS384 + SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { + return SigningMethodHS384 + }) + + // HS512 + SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { + return SigningMethodHS512 + }) +} + +func (m *SigningMethodHMAC) Alg() string { + return m.Name +} + +// Verify implements token verification for the SigningMethod. Returns nil if the signature is valid. +func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { + // Verify the key is the right type + keyBytes, ok := key.([]byte) + if !ok { + return ErrInvalidKeyType + } + + // Decode signature, for comparison + sig, err := DecodeSegment(signature) + if err != nil { + return err + } + + // Can we use the specified hashing method? + if !m.Hash.Available() { + return ErrHashUnavailable + } + + // This signing method is symmetric, so we validate the signature + // by reproducing the signature from the signing string and key, then + // comparing that against the provided signature. + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + if !hmac.Equal(sig, hasher.Sum(nil)) { + return ErrSignatureInvalid + } + + // No validation errors. Signature is good. + return nil +} + +// Sign implements token signing for the SigningMethod. +// Key must be []byte +func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { + if keyBytes, ok := key.([]byte); ok { + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + + return EncodeSegment(hasher.Sum(nil)), nil + } + + return "", ErrInvalidKeyType +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/map_claims.go b/vendor/github.com/golang-jwt/jwt/v4/map_claims.go new file mode 100644 index 0000000000000..e7da633b93c60 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/map_claims.go @@ -0,0 +1,148 @@ +package jwt + +import ( + "encoding/json" + "errors" + "time" + // "fmt" +) + +// MapClaims is a claims type that uses the map[string]interface{} for JSON decoding. +// This is the default claims type if you don't supply one +type MapClaims map[string]interface{} + +// VerifyAudience Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyAudience(cmp string, req bool) bool { + var aud []string + switch v := m["aud"].(type) { + case string: + aud = append(aud, v) + case []string: + aud = v + case []interface{}: + for _, a := range v { + vs, ok := a.(string) + if !ok { + return false + } + aud = append(aud, vs) + } + } + return verifyAud(aud, cmp, req) +} + +// VerifyExpiresAt compares the exp claim against cmp (cmp <= exp). +// If req is false, it will return true, if exp is unset. +func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { + cmpTime := time.Unix(cmp, 0) + + v, ok := m["exp"] + if !ok { + return !req + } + + switch exp := v.(type) { + case float64: + if exp == 0 { + return verifyExp(nil, cmpTime, req) + } + + return verifyExp(&newNumericDateFromSeconds(exp).Time, cmpTime, req) + case json.Number: + v, _ := exp.Float64() + + return verifyExp(&newNumericDateFromSeconds(v).Time, cmpTime, req) + } + + return false +} + +// VerifyIssuedAt compares the exp claim against cmp (cmp >= iat). +// If req is false, it will return true, if iat is unset. +func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { + cmpTime := time.Unix(cmp, 0) + + v, ok := m["iat"] + if !ok { + return !req + } + + switch iat := v.(type) { + case float64: + if iat == 0 { + return verifyIat(nil, cmpTime, req) + } + + return verifyIat(&newNumericDateFromSeconds(iat).Time, cmpTime, req) + case json.Number: + v, _ := iat.Float64() + + return verifyIat(&newNumericDateFromSeconds(v).Time, cmpTime, req) + } + + return false +} + +// VerifyNotBefore compares the nbf claim against cmp (cmp >= nbf). +// If req is false, it will return true, if nbf is unset. +func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { + cmpTime := time.Unix(cmp, 0) + + v, ok := m["nbf"] + if !ok { + return !req + } + + switch nbf := v.(type) { + case float64: + if nbf == 0 { + return verifyNbf(nil, cmpTime, req) + } + + return verifyNbf(&newNumericDateFromSeconds(nbf).Time, cmpTime, req) + case json.Number: + v, _ := nbf.Float64() + + return verifyNbf(&newNumericDateFromSeconds(v).Time, cmpTime, req) + } + + return false +} + +// VerifyIssuer compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { + iss, _ := m["iss"].(string) + return verifyIss(iss, cmp, req) +} + +// Valid validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (m MapClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + if !m.VerifyExpiresAt(now, false) { + vErr.Inner = errors.New("Token is expired") + vErr.Errors |= ValidationErrorExpired + } + + if !m.VerifyIssuedAt(now, false) { + vErr.Inner = errors.New("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if !m.VerifyNotBefore(now, false) { + vErr.Inner = errors.New("Token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/none.go b/vendor/github.com/golang-jwt/jwt/v4/none.go new file mode 100644 index 0000000000000..f19835d2078bb --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/none.go @@ -0,0 +1,52 @@ +package jwt + +// SigningMethodNone implements the none signing method. This is required by the spec +// but you probably should never use it. +var SigningMethodNone *signingMethodNone + +const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" + +var NoneSignatureTypeDisallowedError error + +type signingMethodNone struct{} +type unsafeNoneMagicConstant string + +func init() { + SigningMethodNone = &signingMethodNone{} + NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid) + + RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { + return SigningMethodNone + }) +} + +func (m *signingMethodNone) Alg() string { + return "none" +} + +// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) { + // Key must be UnsafeAllowNoneSignatureType to prevent accidentally + // accepting 'none' signing method + if _, ok := key.(unsafeNoneMagicConstant); !ok { + return NoneSignatureTypeDisallowedError + } + // If signing method is none, signature must be an empty string + if signature != "" { + return NewValidationError( + "'none' signing method with non-empty signature", + ValidationErrorSignatureInvalid, + ) + } + + // Accept 'none' signing method. + return nil +} + +// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) { + if _, ok := key.(unsafeNoneMagicConstant); ok { + return "", nil + } + return "", NoneSignatureTypeDisallowedError +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/parser.go b/vendor/github.com/golang-jwt/jwt/v4/parser.go new file mode 100644 index 0000000000000..0c811f311b636 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/parser.go @@ -0,0 +1,148 @@ +package jwt + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +type Parser struct { + ValidMethods []string // If populated, only these methods will be considered valid + UseJSONNumber bool // Use JSON Number format in JSON decoder + SkipClaimsValidation bool // Skip claims validation during token parsing +} + +// Parse parses, validates, and returns a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) +} + +func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + token, parts, err := p.ParseUnverified(tokenString, claims) + if err != nil { + return token, err + } + + // Verify signing method is in the required set + if p.ValidMethods != nil { + var signingMethodValid = false + var alg = token.Method.Alg() + for _, m := range p.ValidMethods { + if m == alg { + signingMethodValid = true + break + } + } + if !signingMethodValid { + // signing method is not in the listed set + return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid) + } + } + + // Lookup key + var key interface{} + if keyFunc == nil { + // keyFunc was not provided. short circuiting validation + return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable) + } + if key, err = keyFunc(token); err != nil { + // keyFunc returned an error + if ve, ok := err.(*ValidationError); ok { + return token, ve + } + return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} + } + + vErr := &ValidationError{} + + // Validate Claims + if !p.SkipClaimsValidation { + if err := token.Claims.Valid(); err != nil { + + // If the Claims Valid returned an error, check if it is a validation error, + // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set + if e, ok := err.(*ValidationError); !ok { + vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid} + } else { + vErr = e + } + } + } + + // Perform validation + token.Signature = parts[2] + if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + vErr.Inner = err + vErr.Errors |= ValidationErrorSignatureInvalid + } + + if vErr.valid() { + token.Valid = true + return token, nil + } + + return token, vErr +} + +// ParseUnverified parses the token but doesn't validate the signature. +// +// WARNING: Don't use this method unless you know what you're doing. +// +// It's only ever useful in cases where you know the signature is valid (because it has +// been checked previously in the stack) and you want to extract values from it. +func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { + parts = strings.Split(tokenString, ".") + if len(parts) != 3 { + return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) + } + + token = &Token{Raw: tokenString} + + // parse Header + var headerBytes []byte + if headerBytes, err = DecodeSegment(parts[0]); err != nil { + if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { + return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) + } + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + if err = json.Unmarshal(headerBytes, &token.Header); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // parse Claims + var claimBytes []byte + token.Claims = claims + + if claimBytes, err = DecodeSegment(parts[1]); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) + if p.UseJSONNumber { + dec.UseNumber() + } + // JSON Decode. Special case for map type to avoid weird pointer behavior + if c, ok := token.Claims.(MapClaims); ok { + err = dec.Decode(&c) + } else { + err = dec.Decode(&claims) + } + // Handle decode error + if err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // Lookup signature method + if method, ok := token.Header["alg"].(string); ok { + if token.Method = GetSigningMethod(method); token.Method == nil { + return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) + } + } else { + return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) + } + + return token, parts, nil +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/rsa.go b/vendor/github.com/golang-jwt/jwt/v4/rsa.go new file mode 100644 index 0000000000000..b910b19c0b51f --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/rsa.go @@ -0,0 +1,101 @@ +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// SigningMethodRSA implements the RSA family of signing methods. +// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation +type SigningMethodRSA struct { + Name string + Hash crypto.Hash +} + +// Specific instances for RS256 and company +var ( + SigningMethodRS256 *SigningMethodRSA + SigningMethodRS384 *SigningMethodRSA + SigningMethodRS512 *SigningMethodRSA +) + +func init() { + // RS256 + SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { + return SigningMethodRS256 + }) + + // RS384 + SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { + return SigningMethodRS384 + }) + + // RS512 + SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { + return SigningMethodRS512 + }) +} + +func (m *SigningMethodRSA) Alg() string { + return m.Name +} + +// Verify implements token verification for the SigningMethod +// For this signing method, must be an *rsa.PublicKey structure. +func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + var ok bool + + if rsaKey, ok = key.(*rsa.PublicKey); !ok { + return ErrInvalidKeyType + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) +} + +// Sign implements token signing for the SigningMethod +// For this signing method, must be an *rsa.PrivateKey structure. +func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + var ok bool + + // Validate type of key + if rsaKey, ok = key.(*rsa.PrivateKey); !ok { + return "", ErrInvalidKey + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go b/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go new file mode 100644 index 0000000000000..5a8502feb34b7 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go @@ -0,0 +1,142 @@ +// +build go1.4 + +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// SigningMethodRSAPSS implements the RSAPSS family of signing methods signing methods +type SigningMethodRSAPSS struct { + *SigningMethodRSA + Options *rsa.PSSOptions + // VerifyOptions is optional. If set overrides Options for rsa.VerifyPPS. + // Used to accept tokens signed with rsa.PSSSaltLengthAuto, what doesn't follow + // https://tools.ietf.org/html/rfc7518#section-3.5 but was used previously. + // See https://github.com/dgrijalva/jwt-go/issues/285#issuecomment-437451244 for details. + VerifyOptions *rsa.PSSOptions +} + +// Specific instances for RS/PS and company. +var ( + SigningMethodPS256 *SigningMethodRSAPSS + SigningMethodPS384 *SigningMethodRSAPSS + SigningMethodPS512 *SigningMethodRSAPSS +) + +func init() { + // PS256 + SigningMethodPS256 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS256", + Hash: crypto.SHA256, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { + return SigningMethodPS256 + }) + + // PS384 + SigningMethodPS384 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS384", + Hash: crypto.SHA384, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { + return SigningMethodPS384 + }) + + // PS512 + SigningMethodPS512 = &SigningMethodRSAPSS{ + SigningMethodRSA: &SigningMethodRSA{ + Name: "PS512", + Hash: crypto.SHA512, + }, + Options: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + }, + VerifyOptions: &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + }, + } + RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { + return SigningMethodPS512 + }) +} + +// Verify implements token verification for the SigningMethod. +// For this verify method, key must be an rsa.PublicKey struct +func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + switch k := key.(type) { + case *rsa.PublicKey: + rsaKey = k + default: + return ErrInvalidKey + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + opts := m.Options + if m.VerifyOptions != nil { + opts = m.VerifyOptions + } + + return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, opts) +} + +// Sign implements token signing for the SigningMethod. +// For this signing method, key must be an rsa.PrivateKey struct +func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + + switch k := key.(type) { + case *rsa.PrivateKey: + rsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go b/vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go new file mode 100644 index 0000000000000..1966c450bf8b2 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/rsa_utils.go @@ -0,0 +1,105 @@ +package jwt + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrKeyMustBePEMEncoded = errors.New("invalid key: Key must be a PEM encoded PKCS1 or PKCS8 key") + ErrNotRSAPrivateKey = errors.New("key is not a valid RSA private key") + ErrNotRSAPublicKey = errors.New("key is not a valid RSA public key") +) + +// ParseRSAPrivateKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 private key +func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// ParseRSAPrivateKeyFromPEMWithPassword parses a PEM encoded PKCS1 or PKCS8 private key protected with password +// +// Deprecated: This function is deprecated and should not be used anymore. It uses the deprecated x509.DecryptPEMBlock +// function, which was deprecated since RFC 1423 is regarded insecure by design. Unfortunately, there is no alternative +// in the Go standard library for now. See https://github.com/golang/go/issues/8860. +func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + + var blockDecrypted []byte + if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil { + return nil, err + } + + if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// ParseRSAPublicKeyFromPEM parses a PEM encoded PKCS1 or PKCS8 public key +func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *rsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, ErrNotRSAPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/signing_method.go b/vendor/github.com/golang-jwt/jwt/v4/signing_method.go new file mode 100644 index 0000000000000..3269170f31f10 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/signing_method.go @@ -0,0 +1,35 @@ +package jwt + +import ( + "sync" +) + +var signingMethods = map[string]func() SigningMethod{} +var signingMethodLock = new(sync.RWMutex) + +// SigningMethod can be used add new methods for signing or verifying tokens. +type SigningMethod interface { + Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid + Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error + Alg() string // returns the alg identifier for this method (example: 'HS256') +} + +// RegisterSigningMethod registers the "alg" name and a factory function for signing method. +// This is typically done during init() in the method's implementation +func RegisterSigningMethod(alg string, f func() SigningMethod) { + signingMethodLock.Lock() + defer signingMethodLock.Unlock() + + signingMethods[alg] = f +} + +// GetSigningMethod retrieves a signing method from an "alg" string +func GetSigningMethod(alg string) (method SigningMethod) { + signingMethodLock.RLock() + defer signingMethodLock.RUnlock() + + if methodF, ok := signingMethods[alg]; ok { + method = methodF() + } + return +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf b/vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf new file mode 100644 index 0000000000000..53745d51d7c74 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/staticcheck.conf @@ -0,0 +1 @@ +checks = ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1023"] diff --git a/vendor/github.com/golang-jwt/jwt/v4/token.go b/vendor/github.com/golang-jwt/jwt/v4/token.go new file mode 100644 index 0000000000000..b896acb0b4aa5 --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/token.go @@ -0,0 +1,110 @@ +package jwt + +import ( + "encoding/base64" + "encoding/json" + "strings" + "time" +) + +// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). +// You can override it to use another time value. This is useful for testing or if your +// server uses a different time zone than your tokens. +var TimeFunc = time.Now + +// Keyfunc will be used by the Parse methods as a callback function to supply +// the key for verification. The function receives the parsed, +// but unverified Token. This allows you to use properties in the +// Header of the token (such as `kid`) to identify which key to use. +type Keyfunc func(*Token) (interface{}, error) + +// Token represents a JWT Token. Different fields will be used depending on whether you're +// creating or parsing/verifying a token. +type Token struct { + Raw string // The raw token. Populated when you Parse a token + Method SigningMethod // The signing method used or to be used + Header map[string]interface{} // The first segment of the token + Claims Claims // The second segment of the token + Signature string // The third segment of the token. Populated when you Parse a token + Valid bool // Is the token valid? Populated when you Parse/Verify a token +} + +// New creates a new Token. Takes a signing method +func New(method SigningMethod) *Token { + return NewWithClaims(method, MapClaims{}) +} + +func NewWithClaims(method SigningMethod, claims Claims) *Token { + return &Token{ + Header: map[string]interface{}{ + "typ": "JWT", + "alg": method.Alg(), + }, + Claims: claims, + Method: method, + } +} + +// SignedString retrieves the complete, signed token +func (t *Token) SignedString(key interface{}) (string, error) { + var sig, sstr string + var err error + if sstr, err = t.SigningString(); err != nil { + return "", err + } + if sig, err = t.Method.Sign(sstr, key); err != nil { + return "", err + } + return strings.Join([]string{sstr, sig}, "."), nil +} + +// SigningString generates the signing string. This is the +// most expensive part of the whole deal. Unless you +// need this for something special, just go straight for +// the SignedString. +func (t *Token) SigningString() (string, error) { + var err error + parts := make([]string, 2) + for i := range parts { + var jsonValue []byte + if i == 0 { + if jsonValue, err = json.Marshal(t.Header); err != nil { + return "", err + } + } else { + if jsonValue, err = json.Marshal(t.Claims); err != nil { + return "", err + } + } + + parts[i] = EncodeSegment(jsonValue) + } + return strings.Join(parts, "."), nil +} + +// Parse parses, validates, and returns a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return new(Parser).Parse(tokenString, keyFunc) +} + +func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + return new(Parser).ParseWithClaims(tokenString, claims, keyFunc) +} + +// EncodeSegment encodes a JWT specific base64url encoding with padding stripped +// +// Deprecated: In a future release, we will demote this function to a non-exported function, since it +// should only be used internally +func EncodeSegment(seg []byte) string { + return base64.RawURLEncoding.EncodeToString(seg) +} + +// DecodeSegment decodes a JWT specific base64url encoding with padding stripped +// +// Deprecated: In a future release, we will demote this function to a non-exported function, since it +// should only be used internally +func DecodeSegment(seg string) ([]byte, error) { + return base64.RawURLEncoding.DecodeString(seg) +} diff --git a/vendor/github.com/golang-jwt/jwt/v4/types.go b/vendor/github.com/golang-jwt/jwt/v4/types.go new file mode 100644 index 0000000000000..15c39a302183c --- /dev/null +++ b/vendor/github.com/golang-jwt/jwt/v4/types.go @@ -0,0 +1,125 @@ +package jwt + +import ( + "encoding/json" + "fmt" + "reflect" + "strconv" + "time" +) + +// TimePrecision sets the precision of times and dates within this library. +// This has an influence on the precision of times when comparing expiry or +// other related time fields. Furthermore, it is also the precision of times +// when serializing. +// +// For backwards compatibility the default precision is set to seconds, so that +// no fractional timestamps are generated. +var TimePrecision = time.Second + +// MarshalSingleStringAsArray modifies the behaviour of the ClaimStrings type, especially +// its MarshalJSON function. +// +// If it is set to true (the default), it will always serialize the type as an +// array of strings, even if it just contains one element, defaulting to the behaviour +// of the underlying []string. If it is set to false, it will serialize to a single +// string, if it contains one element. Otherwise, it will serialize to an array of strings. +var MarshalSingleStringAsArray = true + +// NumericDate represents a JSON numeric date value, as referenced at +// https://datatracker.ietf.org/doc/html/rfc7519#section-2. +type NumericDate struct { + time.Time +} + +// NewNumericDate constructs a new *NumericDate from a standard library time.Time struct. +// It will truncate the timestamp according to the precision specified in TimePrecision. +func NewNumericDate(t time.Time) *NumericDate { + return &NumericDate{t.Truncate(TimePrecision)} +} + +// newNumericDateFromSeconds creates a new *NumericDate out of a float64 representing a +// UNIX epoch with the float fraction representing non-integer seconds. +func newNumericDateFromSeconds(f float64) *NumericDate { + return NewNumericDate(time.Unix(0, int64(f*float64(time.Second)))) +} + +// MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch +// represented in NumericDate to a byte array, using the precision specified in TimePrecision. +func (date NumericDate) MarshalJSON() (b []byte, err error) { + f := float64(date.Truncate(TimePrecision).UnixNano()) / float64(time.Second) + + return []byte(strconv.FormatFloat(f, 'f', -1, 64)), nil +} + +// UnmarshalJSON is an implementation of the json.RawMessage interface and deserializses a +// NumericDate from a JSON representation, i.e. a json.Number. This number represents an UNIX epoch +// with either integer or non-integer seconds. +func (date *NumericDate) UnmarshalJSON(b []byte) (err error) { + var ( + number json.Number + f float64 + ) + + if err = json.Unmarshal(b, &number); err != nil { + return fmt.Errorf("could not parse NumericData: %w", err) + } + + if f, err = number.Float64(); err != nil { + return fmt.Errorf("could not convert json number value to float: %w", err) + } + + n := newNumericDateFromSeconds(f) + *date = *n + + return nil +} + +// ClaimStrings is basically just a slice of strings, but it can be either serialized from a string array or just a string. +// This type is necessary, since the "aud" claim can either be a single string or an array. +type ClaimStrings []string + +func (s *ClaimStrings) UnmarshalJSON(data []byte) (err error) { + var value interface{} + + if err = json.Unmarshal(data, &value); err != nil { + return err + } + + var aud []string + + switch v := value.(type) { + case string: + aud = append(aud, v) + case []string: + aud = ClaimStrings(v) + case []interface{}: + for _, vv := range v { + vs, ok := vv.(string) + if !ok { + return &json.UnsupportedTypeError{Type: reflect.TypeOf(vv)} + } + aud = append(aud, vs) + } + case nil: + return nil + default: + return &json.UnsupportedTypeError{Type: reflect.TypeOf(v)} + } + + *s = aud + + return +} + +func (s ClaimStrings) MarshalJSON() (b []byte, err error) { + // This handles a special case in the JWT RFC. If the string array, e.g. used by the "aud" field, + // only contains one element, it MAY be serialized as a single string. This may or may not be + // desired based on the ecosystem of other JWT library used, so we make it configurable by the + // variable MarshalSingleStringAsArray. + if len(s) == 1 && !MarshalSingleStringAsArray { + return json.Marshal(s[0]) + } + + return json.Marshal([]string(s)) +} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/gha/gha.go b/vendor/github.com/moby/buildkit/cache/remotecache/gha/gha.go new file mode 100644 index 0000000000000..cefcf5ce1b982 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/remotecache/gha/gha.go @@ -0,0 +1,388 @@ +package gha + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "os" + "sync" + "time" + + "github.com/containerd/containerd/content" + "github.com/moby/buildkit/cache/remotecache" + v1 "github.com/moby/buildkit/cache/remotecache/v1" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/compression" + "github.com/moby/buildkit/util/progress" + "github.com/moby/buildkit/util/tracing" + "github.com/moby/buildkit/worker" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + actionscache "github.com/tonistiigi/go-actions-cache" + "golang.org/x/sync/errgroup" +) + +func init() { + actionscache.Log = logrus.Debugf +} + +const ( + attrScope = "scope" + attrToken = "token" + attrURL = "url" + version = "1" +) + +type Config struct { + Scope string + URL string + Token string +} + +func getConfig(attrs map[string]string) (*Config, error) { + scope, ok := attrs[attrScope] + if !ok { + scope = "buildkit" + } + url, ok := attrs[attrURL] + if !ok { + return nil, errors.Errorf("url not set for github actions cache") + } + token, ok := attrs[attrToken] + if !ok { + return nil, errors.Errorf("token not set for github actions cache") + } + return &Config{ + Scope: scope, + URL: url, + Token: token, + }, nil +} + +// ResolveCacheExporterFunc for Github actions cache exporter. +func ResolveCacheExporterFunc() remotecache.ResolveCacheExporterFunc { + return func(ctx context.Context, g session.Group, attrs map[string]string) (remotecache.Exporter, error) { + cfg, err := getConfig(attrs) + if err != nil { + return nil, err + } + return NewExporter(cfg) + } +} + +type exporter struct { + solver.CacheExporterTarget + chains *v1.CacheChains + cache *actionscache.Cache + config *Config +} + +func NewExporter(c *Config) (remotecache.Exporter, error) { + cc := v1.NewCacheChains() + cache, err := actionscache.New(c.Token, c.URL, actionscache.Opt{Client: tracing.DefaultClient}) + if err != nil { + return nil, err + } + return &exporter{CacheExporterTarget: cc, chains: cc, cache: cache, config: c}, nil +} + +func (ce *exporter) Config() remotecache.Config { + return remotecache.Config{ + Compression: compression.New(compression.Default), + } +} + +func (ce *exporter) blobKey(dgst digest.Digest) string { + return "buildkit-blob-" + version + "-" + dgst.String() +} + +func (ce *exporter) indexKey() string { + scope := "" + for _, s := range ce.cache.Scopes() { + if s.Permission&actionscache.PermissionWrite != 0 { + scope = s.Scope + } + } + scope = digest.FromBytes([]byte(scope)).Hex()[:8] + return "index-" + ce.config.Scope + "-" + version + "-" + scope +} + +func (ce *exporter) Finalize(ctx context.Context) (map[string]string, error) { + // res := make(map[string]string) + config, descs, err := ce.chains.Marshal(ctx) + if err != nil { + return nil, err + } + + // TODO: push parallel + for i, l := range config.Layers { + dgstPair, ok := descs[l.Blob] + if !ok { + return nil, errors.Errorf("missing blob %s", l.Blob) + } + if dgstPair.Descriptor.Annotations == nil { + return nil, errors.Errorf("invalid descriptor without annotations") + } + var diffID digest.Digest + v, ok := dgstPair.Descriptor.Annotations["containerd.io/uncompressed"] + if !ok { + return nil, errors.Errorf("invalid descriptor without uncompressed annotation") + } + dgst, err := digest.Parse(v) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse uncompressed annotation") + } + diffID = dgst + + key := ce.blobKey(dgstPair.Descriptor.Digest) + b, err := ce.cache.Load(ctx, key) + if err != nil { + return nil, err + } + if b == nil { + layerDone := oneOffProgress(ctx, fmt.Sprintf("writing layer %s", l.Blob)) + ra, err := dgstPair.Provider.ReaderAt(ctx, dgstPair.Descriptor) + if err != nil { + return nil, layerDone(err) + } + if err := ce.cache.Save(ctx, key, ra); err != nil { + if !errors.Is(err, os.ErrExist) { + return nil, layerDone(errors.Wrap(err, "error writing layer blob")) + } + } + layerDone(nil) + } + la := &v1.LayerAnnotations{ + DiffID: diffID, + Size: dgstPair.Descriptor.Size, + MediaType: dgstPair.Descriptor.MediaType, + } + if v, ok := dgstPair.Descriptor.Annotations["buildkit/createdat"]; ok { + var t time.Time + if err := (&t).UnmarshalText([]byte(v)); err != nil { + return nil, err + } + la.CreatedAt = t.UTC() + } + config.Layers[i].Annotations = la + } + + dt, err := json.Marshal(config) + if err != nil { + return nil, err + } + + if err := ce.cache.SaveMutable(ctx, ce.indexKey(), 15*time.Second, func(old *actionscache.Entry) (actionscache.Blob, error) { + return actionscache.NewBlob(dt), nil + }); err != nil { + return nil, err + } + + return nil, nil +} + +// ResolveCacheImporterFunc for Github actions cache importer. +func ResolveCacheImporterFunc() remotecache.ResolveCacheImporterFunc { + return func(ctx context.Context, g session.Group, attrs map[string]string) (remotecache.Importer, ocispecs.Descriptor, error) { + cfg, err := getConfig(attrs) + if err != nil { + return nil, ocispecs.Descriptor{}, err + } + i, err := NewImporter(cfg) + if err != nil { + return nil, ocispecs.Descriptor{}, err + } + return i, ocispecs.Descriptor{}, nil + } +} + +type importer struct { + cache *actionscache.Cache + config *Config +} + +func NewImporter(c *Config) (remotecache.Importer, error) { + cache, err := actionscache.New(c.Token, c.URL, actionscache.Opt{Client: tracing.DefaultClient}) + if err != nil { + return nil, err + } + return &importer{cache: cache, config: c}, nil +} + +func (ci *importer) makeDescriptorProviderPair(l v1.CacheLayer) (*v1.DescriptorProviderPair, error) { + if l.Annotations == nil { + return nil, errors.Errorf("cache layer with missing annotations") + } + annotations := map[string]string{} + if l.Annotations.DiffID == "" { + return nil, errors.Errorf("cache layer with missing diffid") + } + annotations["containerd.io/uncompressed"] = l.Annotations.DiffID.String() + if !l.Annotations.CreatedAt.IsZero() { + txt, err := l.Annotations.CreatedAt.MarshalText() + if err != nil { + return nil, err + } + annotations["buildkit/createdat"] = string(txt) + } + desc := ocispecs.Descriptor{ + MediaType: l.Annotations.MediaType, + Digest: l.Blob, + Size: l.Annotations.Size, + Annotations: annotations, + } + return &v1.DescriptorProviderPair{ + Descriptor: desc, + Provider: &ciProvider{desc: desc, ci: ci}, + }, nil +} + +func (ci *importer) loadScope(ctx context.Context, scope string) (*v1.CacheChains, error) { + scope = digest.FromBytes([]byte(scope)).Hex()[:8] + key := "index-" + ci.config.Scope + "-" + version + "-" + scope + + entry, err := ci.cache.Load(ctx, key) + if err != nil { + return nil, err + } + if entry == nil { + return v1.NewCacheChains(), nil + } + + // TODO: this buffer can be removed + buf := &bytes.Buffer{} + if err := entry.WriteTo(ctx, buf); err != nil { + return nil, err + } + + var config v1.CacheConfig + if err := json.Unmarshal(buf.Bytes(), &config); err != nil { + return nil, errors.WithStack(err) + } + + allLayers := v1.DescriptorProvider{} + + for _, l := range config.Layers { + dpp, err := ci.makeDescriptorProviderPair(l) + if err != nil { + return nil, err + } + allLayers[l.Blob] = *dpp + } + + cc := v1.NewCacheChains() + if err := v1.ParseConfig(config, allLayers, cc); err != nil { + return nil, err + } + return cc, nil +} + +func (ci *importer) Resolve(ctx context.Context, _ ocispecs.Descriptor, id string, w worker.Worker) (solver.CacheManager, error) { + eg, ctx := errgroup.WithContext(ctx) + ccs := make([]*v1.CacheChains, len(ci.cache.Scopes())) + + for i, s := range ci.cache.Scopes() { + func(i int, scope string) { + eg.Go(func() error { + cc, err := ci.loadScope(ctx, scope) + if err != nil { + return err + } + ccs[i] = cc + return nil + }) + }(i, s.Scope) + } + + if err := eg.Wait(); err != nil { + return nil, err + } + + cms := make([]solver.CacheManager, 0, len(ccs)) + + for _, cc := range ccs { + keysStorage, resultStorage, err := v1.NewCacheKeyStorage(cc, w) + if err != nil { + return nil, err + } + cms = append(cms, solver.NewCacheManager(ctx, id, keysStorage, resultStorage)) + } + + return solver.NewCombinedCacheManager(cms, nil), nil +} + +type ciProvider struct { + ci *importer + desc ocispecs.Descriptor + mu sync.Mutex + entries map[digest.Digest]*actionscache.Entry +} + +func (p *ciProvider) CheckDescriptor(ctx context.Context, desc ocispecs.Descriptor) error { + if desc.Digest != p.desc.Digest { + return nil + } + + _, err := p.loadEntry(ctx, desc) + return err +} + +func (p *ciProvider) loadEntry(ctx context.Context, desc ocispecs.Descriptor) (*actionscache.Entry, error) { + p.mu.Lock() + defer p.mu.Unlock() + + if ce, ok := p.entries[desc.Digest]; ok { + return ce, nil + } + key := "buildkit-blob-" + version + "-" + desc.Digest.String() + ce, err := p.ci.cache.Load(ctx, key) + if err != nil { + return nil, err + } + if ce == nil { + return nil, errors.Errorf("blob %s not found", desc.Digest) + } + if p.entries == nil { + p.entries = make(map[digest.Digest]*actionscache.Entry) + } + p.entries[desc.Digest] = ce + return ce, nil +} + +func (p *ciProvider) ReaderAt(ctx context.Context, desc ocispecs.Descriptor) (content.ReaderAt, error) { + ce, err := p.loadEntry(ctx, desc) + if err != nil { + return nil, err + } + rac := ce.Download(context.TODO()) + return &readerAt{ReaderAtCloser: rac, desc: desc}, nil +} + +type readerAt struct { + actionscache.ReaderAtCloser + desc ocispecs.Descriptor +} + +func (r *readerAt) Size() int64 { + return r.desc.Size +} + +func oneOffProgress(ctx context.Context, id string) func(err error) error { + pw, _, _ := progress.NewFromContext(ctx) + now := time.Now() + st := progress.Status{ + Started: &now, + } + pw.Write(id, st) + return func(err error) error { + now := time.Now() + st.Completed = &now + pw.Write(id, st) + pw.Close() + return err + } +} diff --git a/vendor/github.com/tonistiigi/go-actions-cache/LICENSE b/vendor/github.com/tonistiigi/go-actions-cache/LICENSE new file mode 100644 index 0000000000000..d79d5687d0b7f --- /dev/null +++ b/vendor/github.com/tonistiigi/go-actions-cache/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Tõnis Tiigi + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/tonistiigi/go-actions-cache/api.md b/vendor/github.com/tonistiigi/go-actions-cache/api.md new file mode 100644 index 0000000000000..6245d2c06c2a6 --- /dev/null +++ b/vendor/github.com/tonistiigi/go-actions-cache/api.md @@ -0,0 +1,66 @@ +# Github Actions Cache service API + +User docs: https://docs.github.com/en/actions/guides/caching-dependencies-to-speed-up-workflows + +API captured from: https://github.com/actions/toolkit/tree/main/packages/cache + +## Authentication + +Actions have access to two special environment variables `ACTIONS_CACHE_URL` and `ACTIONS_RUNTIME_TOKEN`. Inline step scripts in workflows do not see these variables. [`crazy-max/ghaction-github-runtime@v1`](https://github.com/crazy-max/ghaction-github-runtime) action can be used as a workaround if needed to expose them. + +The base URL for cache API is `$ACTIONS_CACHE_URL/_apis/artifactcache/`. + +`ACTIONS_RUNTIME_TOKEN` is a JWT token valid for 6h. Token is associated with repository scopes that can be readwrite or readonly. Eg. a PR has write access to its own scope but readonly access to the target branch scope. + +All requests need to be authenticated with `Authorization: Bearer $ACTIONS_RUNTIME_TOKEN` . + +## Query cache + +### `GET /cache` + +#### Query parameters: + +- `keys` - comma-separated list of keys to query. Keys can be queried by prefix and do not need to match exactly. The newest record matching a prefix is returned. +- `version` - unique value that provides namespacing. The same value needs to be used on saving cache. The actual value does not seem to be significant. + + +#### Response + +On success returns JSON object with following properties: + +- `cacheKey` - full cache key used on saving (not prefix that was used in request) +- `scope` - which scope cache object belongs to +- `archiveLocation` - URL to download blob. This URL is already authenticated and does not need extra authentication with the token. + +## Save cache + +### `POST /caches` + +Reserves a cache key and returns ID (incrementing number) that can be used for uploading cache. Once a key has been reserved, there is no way to save any other data to the same key. Subsequent requests with the same key/version will receive "already exists" error. There does not seem to be a way to discard partial save on error as well that may be problematic with crashes. + +#### Request JSON object: + +- `key` - Key to reserve. A prefix of this is used on query. +- `version` - Namespace that needs to match version on cache query. + +#### Response JSON object: + +- `cacheID` - Numeric unique ID used in next requests. + + +### `PATCH /caches/[cacheID]` + +Uploads a chunk of data to the specified cache record. `Content-Range` headers are used to specify what range of data is being uploaded. + +Request body is `application/octet-stream` raw data. Successful response is empty. + +### `POST /caches/[cacheID]` + +Finalizes the cache record after all data has been uploaded with `PATCH` requests. After calling this method, data becomes available for loading. + +#### Request JSON object: + +- `size` - Total size of the object. Needs to match with the data that was uploaded. + +Successful respone is empty. + diff --git a/vendor/github.com/tonistiigi/go-actions-cache/cache.go b/vendor/github.com/tonistiigi/go-actions-cache/cache.go new file mode 100644 index 0000000000000..3a0f4b1f80cb4 --- /dev/null +++ b/vendor/github.com/tonistiigi/go-actions-cache/cache.go @@ -0,0 +1,645 @@ +package actionscache + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "os/exec" + "strconv" + "strings" + "sync" + "time" + + "github.com/dimchansky/utfbom" + jwt "github.com/golang-jwt/jwt/v4" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +var UploadConcurrency = 4 +var UploadChunkSize = 32 * 1024 * 1024 +var noValidateToken bool + +var Log = func(string, ...interface{}) {} + +type Blob interface { + io.ReaderAt + io.Closer + Size() int64 +} + +type bufferBlob struct { + io.ReaderAt + size int64 +} + +func (b *bufferBlob) Size() int64 { + return b.size +} + +func (b *bufferBlob) Close() error { + return nil +} + +func NewBlob(dt []byte) Blob { + return &bufferBlob{ + ReaderAt: bytes.NewReader(dt), + size: int64(len(dt)), + } +} + +func TryEnv(opt Opt) (*Cache, error) { + tokenEnc, ok := os.LookupEnv("GHCACHE_TOKEN_ENC") + if ok { + url, token, err := decryptToken(tokenEnc, os.Getenv("GHCACHE_TOKEN_PW")) + if err != nil { + return nil, err + } + return New(token, url, opt) + } + + token, ok := os.LookupEnv("ACTIONS_RUNTIME_TOKEN") + if !ok { + return nil, nil + } + + // ACTIONS_CACHE_URL=https://artifactcache.actions.githubusercontent.com/xxx/ + cacheURL, ok := os.LookupEnv("ACTIONS_CACHE_URL") + if !ok { + return nil, nil + } + + return New(token, cacheURL, opt) +} + +type Opt struct { + Client *http.Client + Timeout time.Duration + BackoffPool *BackoffPool +} + +func New(token, url string, opt Opt) (*Cache, error) { + tk, _, err := new(jwt.Parser).ParseUnverified(token, jwt.MapClaims{}) + if err != nil { + return nil, errors.WithStack(err) + } + claims, ok := tk.Claims.(jwt.MapClaims) + if !ok { + return nil, errors.Errorf("invalid token without claims map") + } + ac, ok := claims["ac"] + if !ok { + return nil, errors.Errorf("invalid token without access controls") + } + acs, ok := ac.(string) + if !ok { + return nil, errors.Errorf("invalid token with access controls type %T", ac) + } + + exp, ok := claims["exp"] + if !ok { + return nil, errors.Errorf("invalid token without expiration time") + } + expf, ok := exp.(float64) + if !ok { + return nil, errors.Errorf("invalid token with expiration time type %T", acs) + } + expt := time.Unix(int64(expf), 0) + + if !noValidateToken && time.Now().After(expt) { + return nil, errors.Errorf("cache token expired at %v", expt) + } + + nbf, ok := claims["nbf"] + if !ok { + return nil, errors.Errorf("invalid token without expiration time") + } + nbff, ok := nbf.(float64) + if !ok { + return nil, errors.Errorf("invalid token with expiration time type %T", nbf) + } + nbft := time.Unix(int64(nbff), 0) + + if !noValidateToken && time.Now().Before(nbft) { + return nil, errors.Errorf("invalid token with future issue time time %v", nbft) + } + + scopes := []Scope{} + if err := json.Unmarshal([]byte(acs), &scopes); err != nil { + return nil, errors.Wrap(err, "failed to parse token access controls") + } + Log("parsed token: scopes: %+v, issued: %v, expires: %v", scopes, nbft, expt) + + if opt.Client == nil { + opt.Client = http.DefaultClient + } + if opt.Timeout == 0 { + opt.Timeout = 5 * time.Minute + } + + if opt.BackoffPool == nil { + opt.BackoffPool = defaultBackoffPool + } + + return &Cache{ + opt: opt, + scopes: scopes, + URL: url, + Token: tk, + IssuedAt: nbft, + ExpiresAt: expt, + }, nil +} + +type Scope struct { + Scope string + Permission Permission +} + +type Permission int + +const ( + PermissionRead = 1 << iota + PermissionWrite +) + +func (p Permission) String() string { + out := make([]string, 0, 2) + if p&PermissionRead != 0 { + out = append(out, "Read") + } + if p&PermissionWrite != 0 { + out = append(out, "Write") + } + if p > PermissionRead|PermissionWrite { + return strconv.Itoa(int(p)) + } + return strings.Join(out, "|") +} + +type Cache struct { + opt Opt + scopes []Scope + URL string + Token *jwt.Token + IssuedAt time.Time + ExpiresAt time.Time +} + +func (c *Cache) Scopes() []Scope { + return c.scopes +} + +func (c *Cache) Load(ctx context.Context, keys ...string) (*Entry, error) { + u, err := url.Parse(c.url("cache")) + if err != nil { + return nil, err + } + q := u.Query() + q.Set("keys", strings.Join(keys, ",")) + q.Set("version", version(keys[0])) + u.RawQuery = q.Encode() + + req := c.newRequest("GET", u.String(), nil) + Log("load cache %s", u.String()) + resp, err := c.doWithRetries(ctx, req) + if err != nil { + return nil, errors.WithStack(err) + } + var ce Entry + dt, err := ioutil.ReadAll(io.LimitReader(resp.Body, 32*1024)) + if err != nil { + return nil, errors.WithStack(err) + } + if len(dt) == 0 { + return nil, nil + } + if err := json.Unmarshal(dt, &ce); err != nil { + return nil, errors.WithStack(err) + } + ce.client = c.opt.Client + if ce.Key == "" { + return nil, nil + } + return &ce, nil +} + +func (c *Cache) reserve(ctx context.Context, key string) (int, error) { + dt, err := json.Marshal(ReserveCacheReq{Key: key, Version: version(key)}) + if err != nil { + return 0, errors.WithStack(err) + } + req := c.newRequest("POST", c.url("caches"), func() io.Reader { + return bytes.NewReader(dt) + }) + + req.headers["Content-Type"] = "application/json" + Log("save cache req %s body=%s", req.url, dt) + resp, err := c.doWithRetries(ctx, req) + if err != nil { + return 0, errors.WithStack(err) + } + + dt, err = ioutil.ReadAll(io.LimitReader(resp.Body, 32*1024)) + if err != nil { + return 0, errors.WithStack(err) + } + var cr ReserveCacheResp + if err := json.Unmarshal(dt, &cr); err != nil { + return 0, errors.Wrapf(err, "failed to unmarshal %s", dt) + } + if cr.CacheID == 0 { + return 0, errors.Errorf("invalid response %s", dt) + } + Log("save cache resp: %s", dt) + return cr.CacheID, nil +} + +func (c *Cache) commit(ctx context.Context, id int, size int64) error { + dt, err := json.Marshal(CommitCacheReq{Size: size}) + if err != nil { + return errors.WithStack(err) + } + req := c.newRequest("POST", c.url(fmt.Sprintf("caches/%d", id)), func() io.Reader { + return bytes.NewReader(dt) + }) + req.headers["Content-Type"] = "application/json" + Log("commit cache %s, size %d", req.url, size) + resp, err := c.doWithRetries(ctx, req) + if err != nil { + return errors.Wrapf(err, "error committing cache %d", id) + } + dt, err = ioutil.ReadAll(io.LimitReader(resp.Body, 32*1024)) + if err != nil { + return err + } + if len(dt) != 0 { + Log("commit response: %s", dt) + } + return resp.Body.Close() +} + +func (c *Cache) upload(ctx context.Context, id int, b Blob) error { + var mu sync.Mutex + eg, ctx := errgroup.WithContext(ctx) + offset := int64(0) + for i := 0; i < UploadConcurrency; i++ { + eg.Go(func() error { + for { + mu.Lock() + start := offset + if start >= b.Size() { + mu.Unlock() + return nil + } + end := start + int64(UploadChunkSize) + if end > b.Size() { + end = b.Size() + } + offset = end + mu.Unlock() + + if err := c.uploadChunk(ctx, id, b, start, end-start); err != nil { + return err + } + } + }) + } + return eg.Wait() +} + +func (c *Cache) Save(ctx context.Context, key string, b Blob) error { + id, err := c.reserve(ctx, key) + if err != nil { + return err + } + + if err := c.upload(ctx, id, b); err != nil { + return err + } + + return c.commit(ctx, id, b.Size()) +} + +// SaveMutable stores a blob over a possibly existing key. Previous value is passed to callback +// that needs to return new blob. Callback may be called multiple times if two saves happen during +// same time window. In case of a crash a key may remain locked, preventing previous changes. Timeout +// can be set to force changes in this case without guaranteeing that previous value was up to date. +func (c *Cache) SaveMutable(ctx context.Context, key string, forceTimeout time.Duration, f func(old *Entry) (Blob, error)) error { + var blocked time.Duration +loop0: + for { + ce, err := c.Load(ctx, key+"#") + if err != nil { + return err + } + b, err := f(ce) + if err != nil { + return err + } + defer b.Close() + if ce != nil { + // check if index changed while loading + ce2, err := c.Load(ctx, key+"#") + if err != nil { + return err + } + if ce2 == nil || ce2.Key != ce.Key { + continue + } + } + idx := 0 + if ce != nil { + idxs := strings.TrimPrefix(ce.Key, key+"#") + if idxs == "" { + return errors.Errorf("corrupt empty index for %s", key) + } + idx, err = strconv.Atoi(idxs) + if err != nil { + return errors.Wrapf(err, "failed to parse %s index", key) + } + } + var cacheID int + for { + idx++ + cacheID, err = c.reserve(ctx, fmt.Sprintf("%s#%d", key, idx)) + if err != nil { + if errors.Is(err, os.ErrExist) { + if blocked <= forceTimeout { + blocked += 2 * time.Second + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(2 * time.Second): + } + continue loop0 + } + continue // index has been blocked a long time, maybe crashed, skip to next number + } + return err + } + break + } + if err := c.upload(ctx, cacheID, b); err != nil { + return nil + } + return c.commit(ctx, cacheID, b.Size()) + } +} + +func (c *Cache) uploadChunk(ctx context.Context, id int, ra io.ReaderAt, off, n int64) error { + req := c.newRequest("PATCH", c.url(fmt.Sprintf("caches/%d", id)), func() io.Reader { + return io.NewSectionReader(ra, off, n) + }) + req.headers["Content-Type"] = "application/octet-stream" + req.headers["Content-Range"] = fmt.Sprintf("bytes %d-%d/*", off, off+n-1) + + Log("upload cache chunk %s, range %d-%d", req.url, off, off+n-1) + resp, err := c.doWithRetries(ctx, req) + if err != nil { + return errors.WithStack(err) + } + dt, err := ioutil.ReadAll(io.LimitReader(resp.Body, 32*1024)) + if err != nil { + return errors.WithStack(err) + } + if len(dt) != 0 { + Log("upload chunk resp: %s", dt) + } + return resp.Body.Close() +} + +func (c *Cache) newRequest(method, url string, body func() io.Reader) *request { + return &request{ + method: method, + url: url, + body: body, + headers: map[string]string{ + "Authorization": "Bearer " + c.Token.Raw, + "Accept": "application/json;api-version=6.0-preview.1", + }, + } +} + +func (c *Cache) doWithRetries(ctx context.Context, r *request) (*http.Response, error) { + var err error + max := time.Now().Add(c.opt.Timeout) + for { + if err1 := c.opt.BackoffPool.Wait(ctx, time.Until(max)); err1 != nil { + if err != nil { + return nil, errors.Wrapf(err, "%v", err1) + } + return nil, err1 + } + req, err := r.httpReq() + if err != nil { + return nil, err + } + req = req.WithContext(ctx) + + var resp *http.Response + resp, err = c.opt.Client.Do(req) + if err != nil { + return nil, errors.WithStack(err) + } + if err := checkResponse(resp); err != nil { + var he HTTPError + if errors.As(err, &he) { + if he.StatusCode == http.StatusTooManyRequests { + c.opt.BackoffPool.Delay() + continue + } + } + c.opt.BackoffPool.Reset() + return nil, err + } + c.opt.BackoffPool.Reset() + return resp, nil + } +} + +func (c *Cache) url(p string) string { + return c.URL + "_apis/artifactcache/" + p +} + +type ReserveCacheReq struct { + Key string `json:"key"` + Version string `json:"version"` +} + +type ReserveCacheResp struct { + CacheID int `json:"cacheID"` +} + +type CommitCacheReq struct { + Size int64 `json:"size"` +} + +type Entry struct { + Key string `json:"cacheKey"` + Scope string `json:"scope"` + URL string `json:"archiveLocation"` + + client *http.Client +} + +func (ce *Entry) WriteTo(ctx context.Context, w io.Writer) error { + rac := ce.Download(ctx) + if _, err := io.Copy(w, &rc{ReaderAt: rac}); err != nil { + return err + } + return rac.Close() +} + +// Download returns a ReaderAtCloser for pulling the data. Concurrent reads are not allowed +func (ce *Entry) Download(ctx context.Context) ReaderAtCloser { + return toReaderAtCloser(func(offset int64) (io.ReadCloser, error) { + req, err := http.NewRequest("GET", ce.URL, nil) + if err != nil { + return nil, errors.WithStack(err) + } + req = req.WithContext(ctx) + if offset != 0 { + req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) + } + client := ce.client + if client == nil { + client = http.DefaultClient + } + resp, err := client.Do(req) + if err != nil { + return nil, errors.WithStack(err) + } + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + if resp.StatusCode == http.StatusRequestedRangeNotSatisfiable { + return nil, errors.Errorf("invalid status response %v for %s, range: %v", resp.Status, ce.URL, req.Header.Get("Range")) + } + return nil, errors.Errorf("invalid status response %v for %s", resp.Status, ce.URL) + } + if offset != 0 { + cr := resp.Header.Get("content-range") + if !strings.HasPrefix(cr, fmt.Sprintf("bytes %d-", offset)) { + resp.Body.Close() + return nil, errors.Errorf("unhandled content range in response: %v", cr) + } + } + return resp.Body, nil + }) +} + +type request struct { + method string + url string + body func() io.Reader + headers map[string]string +} + +func (r *request) httpReq() (*http.Request, error) { + var body io.Reader + if r.body != nil { + body = r.body() + } + req, err := http.NewRequest(r.method, r.url, body) + if err != nil { + return nil, err + } + for k, v := range r.headers { + req.Header.Add(k, v) + } + return req, nil +} + +func version(k string) string { + h := sha256.New() + // h.Write([]byte(k)) + // upstream uses paths in version, we don't seem to have anything that is unique like this + h.Write([]byte("|go-actionscache-1.0")) + return hex.EncodeToString(h.Sum(nil)) +} + +type GithubAPIError struct { + Message string `json:"message"` + TypeName string `json:"typeName"` + TypeKey string `json:"typeKey"` + ErrorCode int `json:"errorCode"` +} + +func (e GithubAPIError) Error() string { + return e.Message +} + +func (e GithubAPIError) Is(err error) bool { + if err == os.ErrExist { + if strings.Contains(e.TypeKey, "AlreadyExists") { + return true + } + // for safety, in case error gets updated + if strings.Contains(strings.ToLower(e.Message), "already exists") { + return true + } + } + return false +} + +type HTTPError struct { + StatusCode int + Err error +} + +func (e HTTPError) Error() string { + return e.Err.Error() +} + +func (e HTTPError) Unwrap() error { + return e.Err +} + +func checkResponse(resp *http.Response) error { + if resp.StatusCode >= 200 && resp.StatusCode < 300 { + return nil + } + dt, err := ioutil.ReadAll(utfbom.SkipOnly(io.LimitReader(resp.Body, 32*1024))) + if err != nil { + return errors.WithStack(err) + } + var gae GithubAPIError + if err1 := json.Unmarshal(dt, &gae); err1 != nil { + err = errors.Wrapf(err1, "failed to parse error response %d: %s", resp.StatusCode, dt) + } else if gae.Message != "" { + err = errors.WithStack(gae) + } else { + err = errors.Errorf("unknown error %s: %s", resp.Status, dt) + } + + return HTTPError{ + StatusCode: resp.StatusCode, + Err: err, + } +} + +func decryptToken(enc, pass string) (string, string, error) { + // openssl key derivation uses some non-standard algorithm so exec instead of using go libraries + // this is only used on testing anyway + cmd := exec.Command("openssl", "enc", "-d", "-aes-256-cbc", "-a", "-A", "-salt", "-md", "sha256", "-pass", "env:GHCACHE_TOKEN_PW") + cmd.Env = append(cmd.Env, fmt.Sprintf("GHCACHE_TOKEN_PW=%s", pass)) + cmd.Stdin = bytes.NewReader([]byte(enc)) + buf := &bytes.Buffer{} + cmd.Stdout = buf + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + return "", "", err + } + parts := bytes.SplitN(buf.Bytes(), []byte(":::"), 2) + if len(parts) != 2 { + return "", "", errors.Errorf("invalid decrypt contents %s", buf.String()) + } + return string(parts[0]), strings.TrimSpace(string(parts[1])), nil +} diff --git a/vendor/github.com/tonistiigi/go-actions-cache/readerat.go b/vendor/github.com/tonistiigi/go-actions-cache/readerat.go new file mode 100644 index 0000000000000..566db7ceebecf --- /dev/null +++ b/vendor/github.com/tonistiigi/go-actions-cache/readerat.go @@ -0,0 +1,89 @@ +package actionscache + +import ( + "io" +) + +type ReaderAtCloser interface { + io.ReaderAt + io.Closer +} + +type readerAtCloser struct { + offset int64 + rc io.ReadCloser + ra io.ReaderAt + open func(offset int64) (io.ReadCloser, error) + closed bool +} + +func toReaderAtCloser(open func(offset int64) (io.ReadCloser, error)) ReaderAtCloser { + return &readerAtCloser{ + open: open, + } +} + +func (hrs *readerAtCloser) ReadAt(p []byte, off int64) (n int, err error) { + if hrs.closed { + return 0, io.EOF + } + + if hrs.ra != nil { + return hrs.ra.ReadAt(p, off) + } + + if hrs.rc == nil || off != hrs.offset { + if hrs.rc != nil { + hrs.rc.Close() + hrs.rc = nil + } + rc, err := hrs.open(off) + if err != nil { + return 0, err + } + hrs.rc = rc + } + if ra, ok := hrs.rc.(io.ReaderAt); ok { + hrs.ra = ra + n, err = ra.ReadAt(p, off) + } else { + for { + var nn int + nn, err = hrs.rc.Read(p) + n += nn + p = p[nn:] + if nn == len(p) || err != nil { + break + } + } + } + + hrs.offset += int64(n) + return +} + +func (hrs *readerAtCloser) Close() error { + if hrs.closed { + return nil + } + hrs.closed = true + if hrs.rc != nil { + return hrs.rc.Close() + } + + return nil +} + +type rc struct { + io.ReaderAt + offset int +} + +func (r *rc) Read(b []byte) (int, error) { + n, err := r.ReadAt(b, int64(r.offset)) + r.offset += n + if n > 0 && err == io.EOF { + err = nil + } + return n, err +} diff --git a/vendor/github.com/tonistiigi/go-actions-cache/retry.go b/vendor/github.com/tonistiigi/go-actions-cache/retry.go new file mode 100644 index 0000000000000..9487048f88e85 --- /dev/null +++ b/vendor/github.com/tonistiigi/go-actions-cache/retry.go @@ -0,0 +1,108 @@ +package actionscache + +import ( + "context" + "sync" + "time" + + "github.com/pkg/errors" +) + +const maxBackoff = time.Second * 90 +const minBackoff = time.Second * 1 + +var defaultBackoffPool = &BackoffPool{} + +type BackoffPool struct { + mu sync.Mutex + queue []chan struct{} + timer *time.Timer + backoff time.Duration + target time.Time +} + +func (b *BackoffPool) Wait(ctx context.Context, timeout time.Duration) error { + b.mu.Lock() + if b.timer == nil { + b.mu.Unlock() + return nil + } + + done := make(chan struct{}) + b.queue = append(b.queue, done) + + b.mu.Unlock() + + select { + case <-ctx.Done(): + return ctx.Err() + case <-done: + return nil + case <-time.After(timeout): + return errors.Errorf("maximum timeout reached") + } +} + +func (b *BackoffPool) Reset() { + b.mu.Lock() + b.reset() + b.backoff = 0 + b.mu.Unlock() +} +func (b *BackoffPool) reset() { + for _, done := range b.queue { + close(done) + } + b.queue = nil + if b.timer != nil { + b.timer.Stop() + b.timer = nil + } +} + +func (b *BackoffPool) trigger(t *time.Timer) { + b.mu.Lock() + if b.timer != t { + // this timer is not the current one + b.mu.Unlock() + return + } + + b.reset() + b.backoff = b.backoff * 2 + if b.backoff > maxBackoff { + b.backoff = maxBackoff + } + b.mu.Unlock() +} + +func (b *BackoffPool) Delay() { + b.mu.Lock() + if b.timer != nil { + minTime := time.Now().Add(minBackoff) + if b.target.Before(minTime) { + b.target = minTime + b.timer.Stop() + b.setupTimer() + } + b.mu.Unlock() + return + } + + if b.backoff == 0 { + b.backoff = minBackoff + } + + b.target = time.Now().Add(b.backoff) + b.setupTimer() + + b.mu.Unlock() +} + +func (b *BackoffPool) setupTimer() { + var t *time.Timer + b.timer = time.AfterFunc(time.Until(b.target), func() { + b.trigger(t) + }) + t = b.timer +} diff --git a/vendor/modules.txt b/vendor/modules.txt index ed51088e0f837..45fce02170c4d 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -296,6 +296,9 @@ github.com/cyphar/filepath-securejoin # github.com/deckarep/golang-set v0.0.0-20141123011944-ef32fa3046d9 ## explicit github.com/deckarep/golang-set +# github.com/dimchansky/utfbom v1.1.1 +## explicit +github.com/dimchansky/utfbom # github.com/docker/distribution v2.8.1+incompatible ## explicit github.com/docker/distribution @@ -375,6 +378,9 @@ github.com/gogo/protobuf/proto github.com/gogo/protobuf/protoc-gen-gogo/descriptor github.com/gogo/protobuf/sortkeys github.com/gogo/protobuf/types +# github.com/golang-jwt/jwt/v4 v4.1.0 +## explicit; go 1.15 +github.com/golang-jwt/jwt/v4 # github.com/golang/gddo v0.0.0-20190904175337-72a348e765d2 ## explicit github.com/golang/gddo/httputil @@ -507,6 +513,7 @@ github.com/moby/buildkit/cache/config github.com/moby/buildkit/cache/contenthash github.com/moby/buildkit/cache/metadata github.com/moby/buildkit/cache/remotecache +github.com/moby/buildkit/cache/remotecache/gha github.com/moby/buildkit/cache/remotecache/inline github.com/moby/buildkit/cache/remotecache/local github.com/moby/buildkit/cache/remotecache/registry @@ -793,6 +800,9 @@ github.com/tinylib/msgp/msgp github.com/tonistiigi/fsutil github.com/tonistiigi/fsutil/copy github.com/tonistiigi/fsutil/types +# github.com/tonistiigi/go-actions-cache v0.0.0-20220404170428-0bdeb6e1eac7 +## explicit; go 1.16 +github.com/tonistiigi/go-actions-cache # github.com/tonistiigi/go-archvariant v1.0.0 ## explicit; go 1.17 github.com/tonistiigi/go-archvariant From e132b0decafaf33a9f984818e78f35bcb17389bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Gronowski?= Date: Tue, 16 Aug 2022 13:51:53 +0200 Subject: [PATCH 44/90] c8d/prune: Handle filters, don't delete used MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Paweł Gronowski --- daemon/containerd/image.go | 17 ++++ daemon/containerd/image_list.go | 29 ++++++ daemon/containerd/image_prune.go | 160 +++++++++++++++++++++++-------- daemon/containerd/service.go | 9 +- 4 files changed, 173 insertions(+), 42 deletions(-) diff --git a/daemon/containerd/image.go b/daemon/containerd/image.go index aade0e66db617..8832a5dbebefb 100644 --- a/daemon/containerd/image.go +++ b/daemon/containerd/image.go @@ -270,3 +270,20 @@ func (i *ImageService) resolveImageName(ctx context.Context, refOrID string) (oc return img.Target, namedRef, nil } + +// PresentChildrenHandler traverses recursively all children descriptors that are present in the store. +func (i *ImageService) presentChildrenHandler() containerdimages.HandlerFunc { + store := i.client.ContentStore() + + return func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { + _, err = store.ReaderAt(ctx, desc) + if err != nil { + if cerrdefs.IsNotFound(err) { + return nil, nil + } + return nil, err + } + + return containerdimages.Children(ctx, store, desc) + } +} diff --git a/daemon/containerd/image_list.go b/daemon/containerd/image_list.go index 0818d34533478..f1d80d15a6e5c 100644 --- a/daemon/containerd/image_list.go +++ b/daemon/containerd/image_list.go @@ -2,11 +2,13 @@ package containerd import ( "context" + "time" "github.com/containerd/containerd" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" + timetypes "github.com/docker/docker/api/types/time" "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/identity" ) @@ -159,11 +161,38 @@ func (i *ImageService) setupFilters(ctx context.Context, imageFilters filters.Ar return nil, err } + err = imageFilters.WalkValues("until", func(value string) error { + ts, err := timetypes.GetTimestamp(value, time.Now()) + if err != nil { + return err + } + seconds, nanoseconds, err := timetypes.ParseTimestamps(ts, 0) + if err != nil { + return err + } + until := time.Unix(seconds, nanoseconds) + + fltrs = append(fltrs, func(image containerd.Image) bool { + created := image.Metadata().CreatedAt + return created.Before(until) + }) + return err + }) + if err != nil { + return nil, err + } + if imageFilters.Contains("label") { fltrs = append(fltrs, func(image containerd.Image) bool { return imageFilters.MatchKVList("label", image.Labels()) }) } + if imageFilters.Contains("label!") { + fltrs = append(fltrs, func(image containerd.Image) bool { + return !imageFilters.MatchKVList("label!", image.Labels()) + }) + } + return func(image containerd.Image) bool { for _, filter := range fltrs { if !filter(image) { diff --git a/daemon/containerd/image_prune.go b/daemon/containerd/image_prune.go index 2a9bf4fdda874..ebeb967cf76e0 100644 --- a/daemon/containerd/image_prune.go +++ b/daemon/containerd/image_prune.go @@ -3,53 +3,138 @@ package containerd import ( "context" "fmt" + "sync/atomic" - "github.com/containerd/containerd/content" + "github.com/containerd/containerd" cerrdefs "github.com/containerd/containerd/errdefs" containerdimages "github.com/containerd/containerd/images" - "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/leases" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "github.com/opencontainers/go-digest" + "github.com/docker/docker/errdefs" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) +var imagesAcceptedFilters = map[string]bool{ + "dangling": true, + "label": true, + "label!": true, + "until": true, +} + +// errPruneRunning is returned when a prune request is received while +// one is in progress +var errPruneRunning = errdefs.Conflict(errors.New("a prune operation is already running")) + // ImagesPrune removes unused images -// TODO: handle pruneFilters func (i *ImageService) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (*types.ImagesPruneReport, error) { + if !atomic.CompareAndSwapInt32(&i.pruneRunning, 0, 1) { + return nil, errPruneRunning + } + defer atomic.StoreInt32(&i.pruneRunning, 0) + + err := pruneFilters.Validate(imagesAcceptedFilters) + if err != nil { + return nil, err + } + + danglingOnly := true + if pruneFilters.Contains("dangling") { + if pruneFilters.ExactMatch("dangling", "false") || pruneFilters.ExactMatch("dangling", "0") { + danglingOnly = false + } else if !pruneFilters.ExactMatch("dangling", "true") && !pruneFilters.ExactMatch("dangling", "1") { + return nil, fmt.Errorf("invalid dangling filter value: %q", pruneFilters.Get("dangling")) + } + } + + filterFunc, err := i.setupFilters(ctx, pruneFilters) + if err != nil { + return nil, err + } + + if !danglingOnly { + r, errs := i.pruneUnused(ctx, filterFunc) + if len(errs) > 0 { + return &r, combineErrors(errs) + } + + return &r, nil + } else { + // In containerd dangling content is automatically deleted by the GC. + // So running prune with dangling=true is mostly a no-op, unless there + // was some action performed which didn't invoke the GC immediately. + report := types.ImagesPruneReport{} + // Trigger GC. + ls := i.client.LeasesService() + lease, err := ls.Create(ctx) + if err != nil { + return &report, err + } + err = ls.Delete(ctx, lease, leases.SynchronousDelete) + return &report, err + } +} + +func (i *ImageService) pruneUnused(ctx context.Context, filterFunc imageFilterFunc) (types.ImagesPruneReport, []error) { + report := types.ImagesPruneReport{} is := i.client.ImageService() store := i.client.ContentStore() - images, err := is.List(ctx) + allImages, err := i.client.ListImages(ctx) if err != nil { - return nil, errors.Wrapf(err, "Failed to list images") + return report, []error{err} + } + + imagesToPrune := map[string]containerd.Image{} + for _, img := range allImages { + imagesToPrune[img.Name()] = img } - platform := platforms.DefaultStrict() - report := types.ImagesPruneReport{} - toDelete := map[digest.Digest]uint64{} errs := []error{} - for _, img := range images { - err := getContentDigestsWithSizes(ctx, img, store, platform, toDelete) - if err != nil { - errs = append(errs, err) - continue + // Apply filters + for name, img := range imagesToPrune { + filteredOut := !filterFunc(img) + logrus.WithField("image", name).WithField("filteredOut", filteredOut).Debug("filtering image") + if filteredOut { + delete(imagesToPrune, name) } } - for digest, size := range toDelete { - report.SpaceReclaimed += size - report.ImagesDeleted = append(report.ImagesDeleted, - types.ImageDeleteResponseItem{ - Deleted: digest.String(), - }, - ) + cs := i.client.ContainerService() + containers, err := cs.List(ctx) + if err != nil { + return report, []error{err} + } + + // Exclude images that are used by existing containers from prune + for _, container := range containers { + logrus.WithField("container", container.ID).WithField("image", container.Image).Debug("filtering container's image") + if container.Image != "" { + delete(imagesToPrune, container.Image) + } } - for _, img := range images { - err = is.Delete(ctx, img.Name, containerdimages.SynchronousDelete()) + logrus.WithField("images", imagesToPrune).Debug("pruning") + + for _, img := range imagesToPrune { + blobs := []ocispec.Descriptor{} + + err = containerdimages.Walk(ctx, containerdimages.Handlers( + i.presentChildrenHandler(), + containerdimages.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { + blobs = append(blobs, desc) + return nil, nil + }), + ), img.Target()) + + if err != nil { + errs = append(errs, err) + continue + } + err = is.Delete(ctx, img.Name(), containerdimages.SynchronousDelete()) if err != nil && !cerrdefs.IsNotFound(err) { errs = append(errs, err) continue @@ -57,26 +142,25 @@ func (i *ImageService) ImagesPrune(ctx context.Context, pruneFilters filters.Arg report.ImagesDeleted = append(report.ImagesDeleted, types.ImageDeleteResponseItem{ - Untagged: img.Name, + Untagged: img.Name(), }, ) - } - - if len(errs) > 0 { - return &report, combineErrors(errs) - } - return &report, nil -} + // Check which blobs have been deleted and sum their sizes + for _, blob := range blobs { + _, err := store.ReaderAt(ctx, blob) -func getContentDigestsWithSizes(ctx context.Context, img containerdimages.Image, store content.Store, platform platforms.MatchComparer, toDelete map[digest.Digest]uint64) error { - return containerdimages.Walk(ctx, containerdimages.Handlers(containerdimages.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { - if desc.Size < 0 { - return nil, fmt.Errorf("invalid size %v in %v (%v)", desc.Size, desc.Digest, desc.MediaType) + if cerrdefs.IsNotFound(err) { + report.ImagesDeleted = append(report.ImagesDeleted, + types.ImageDeleteResponseItem{ + Deleted: blob.Digest.String(), + }, + ) + report.SpaceReclaimed += uint64(blob.Size) + } } - toDelete[desc.Digest] = uint64(desc.Size) - return nil, nil - }), containerdimages.LimitManifests(containerdimages.FilterPlatforms(containerdimages.ChildrenHandler(store), platform), platform, 1)), img.Target) + } + return report, errs } func combineErrors(errs []error) error { diff --git a/daemon/containerd/service.go b/daemon/containerd/service.go index b19cc84b963af..952770f2e982e 100644 --- a/daemon/containerd/service.go +++ b/daemon/containerd/service.go @@ -21,10 +21,11 @@ import ( // ImageService implements daemon.ImageService type ImageService struct { - client *containerd.Client - usage singleflight.Group - containers container.Store - snapshotter string + client *containerd.Client + usage singleflight.Group + containers container.Store + snapshotter string + pruneRunning int32 } // NewService creates a new ImageService. From 8523afd36dd9e6a186004d1761997c7737f6f31b Mon Sep 17 00:00:00 2001 From: Djordje Lukic Date: Wed, 17 Aug 2022 12:24:51 +0200 Subject: [PATCH 45/90] Create a lease for the snapshots we prepare With a lease we make sure that containerd won't garbage collect out snapshot. We remove the lease when the container is removed and leave it to containerd to clean things up. Signed-off-by: Djordje Lukic --- daemon/create.go | 11 +++++++++++ daemon/delete.go | 12 ++++++++++++ 2 files changed, 23 insertions(+) diff --git a/daemon/create.go b/daemon/create.go index f75bd0bc124c7..715f04dca1ec9 100644 --- a/daemon/create.go +++ b/daemon/create.go @@ -10,6 +10,7 @@ import ( "github.com/containerd/containerd" containerdimages "github.com/containerd/containerd/images" + "github.com/containerd/containerd/leases" "github.com/containerd/containerd/platforms" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" @@ -193,6 +194,16 @@ func (daemon *Daemon) create(ctx context.Context, opts createOpts) (retC *contai if _, err := s.Prepare(ctx, ctr.ID, parent); err != nil { return nil, err } + // Add a lease so that containerd doesn't garbage collect our snapshot + ls := daemon.containerdCli.LeasesService() + lease, err := ls.Create(ctx, leases.WithID(ctr.ID)) + if err != nil { + return nil, err + } + ls.AddResource(ctx, lease, leases.Resource{ + ID: ctr.ID, + Type: "snapshots/" + daemon.graphDriver, + }) } else { // Set RWLayer for container after mount labels have been set rwLayer, err := daemon.imageService.CreateLayer(ctr, setupInitLayer(daemon.idMapping)) diff --git a/daemon/delete.go b/daemon/delete.go index 96e37b6dc7850..54f4c07a023a6 100644 --- a/daemon/delete.go +++ b/daemon/delete.go @@ -8,6 +8,7 @@ import ( "strings" "time" + "github.com/containerd/containerd/leases" "github.com/docker/docker/api/types" containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/container" @@ -136,6 +137,17 @@ func (daemon *Daemon) cleanupContainer(container *container.Container, config ty return err } container.RWLayer = nil + } else { + if daemon.UsesSnapshotter() { + ls := daemon.containerdCli.LeasesService() + lease := leases.Lease{ + ID: container.ID, + } + if err := ls.Delete(context.Background(), lease, leases.SynchronousDelete); err != nil { + container.SetRemovalError(err) + return err + } + } } if err := containerfs.EnsureRemoveAll(container.Root); err != nil { From eb228ea1ad615b24d4ca245c1cb83e6129b69641 Mon Sep 17 00:00:00 2001 From: Djordje Lukic Date: Wed, 17 Aug 2022 11:29:18 +0200 Subject: [PATCH 46/90] Use getImage() to get the image when deleting This makes it possible to delete an image by name or id Signed-off-by: Djordje Lukic --- daemon/containerd/image_delete.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/daemon/containerd/image_delete.go b/daemon/containerd/image_delete.go index a88669920bbb9..7e572f44d89e7 100644 --- a/daemon/containerd/image_delete.go +++ b/daemon/containerd/image_delete.go @@ -4,7 +4,6 @@ import ( "context" "github.com/containerd/containerd/images" - "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" ) @@ -47,16 +46,15 @@ import ( // TODO(thaJeztah): add support for image delete using image (short)ID; see https://github.com/moby/moby/issues/43854 // TODO(thaJeztah): mage delete should send image "untag" events and prometheus counters; see https://github.com/moby/moby/issues/43855 func (i *ImageService) ImageDelete(ctx context.Context, imageRef string, force, prune bool) ([]types.ImageDeleteResponseItem, error) { - parsedRef, err := reference.ParseNormalizedNamed(imageRef) + c8dImg, _, err := i.getImage(ctx, imageRef) if err != nil { return nil, err } - ref := reference.TagNameOnly(parsedRef) - err = i.client.ImageService().Delete(ctx, ref.String(), images.SynchronousDelete()) + err = i.client.ImageService().Delete(ctx, c8dImg.Name(), images.SynchronousDelete()) if err != nil { return nil, err } - return []types.ImageDeleteResponseItem{{Untagged: reference.FamiliarString(parsedRef)}}, nil + return []types.ImageDeleteResponseItem{{Untagged: c8dImg.Name()}}, nil } From e3d77a12ac308bf3fc4204fe65dbbad5e41a630c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Gronowski?= Date: Wed, 17 Aug 2022 15:39:37 +0200 Subject: [PATCH 47/90] c8d/exporter: Use configured snapshotter instead of default MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Paweł Gronowski --- daemon/containerd/image_exporter.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/daemon/containerd/image_exporter.go b/daemon/containerd/image_exporter.go index 658f12aed17bd..8de68efe5ca61 100644 --- a/daemon/containerd/image_exporter.go +++ b/daemon/containerd/image_exporter.go @@ -20,7 +20,7 @@ import ( ) func (i *ImageService) PerformWithBaseFS(ctx context.Context, c *container.Container, fn func(containerfs.ContainerFS) error) error { - snapshotter := i.client.SnapshotService(containerd.DefaultSnapshotter) + snapshotter := i.client.SnapshotService(i.snapshotter) mounts, err := snapshotter.Mounts(ctx, c.ID) if err != nil { return err From dacb5075dffddd89aedddac268c8e5189aaab207 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Gronowski?= Date: Wed, 17 Aug 2022 16:50:13 +0200 Subject: [PATCH 48/90] c8d/pull: Don't unpack manually MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We pass WithPullUnpack anyway Signed-off-by: Paweł Gronowski --- daemon/containerd/image_pull.go | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/daemon/containerd/image_pull.go b/daemon/containerd/image_pull.go index dcb9861383832..f78da440c8c3b 100644 --- a/daemon/containerd/image_pull.go +++ b/daemon/containerd/image_pull.go @@ -61,21 +61,7 @@ func (i *ImageService) PullImage(ctx context.Context, image, tagOrDigest string, finishProgress := showProgress(ctx, jobs, outStream, pullProgress(i.client.ContentStore())) defer finishProgress() - img, err := i.client.Pull(ctx, ref.String(), opts...) - if err != nil { - return err - } - - unpacked, err := img.IsUnpacked(ctx, i.snapshotter) - if err != nil { - return err - } - - if !unpacked { - if err := img.Unpack(ctx, i.snapshotter); err != nil { - return err - } - } + _, err = i.client.Pull(ctx, ref.String(), opts...) return err } From 71c723b8d3b6d63ba2e8a43d30af57619ab6ed0d Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Thu, 18 Aug 2022 10:08:41 +0200 Subject: [PATCH 49/90] daemon/config: remove unused DefaultContainerdSnapshotter This was added as part of rumpl/moby#40, but wasn't used. Signed-off-by: Sebastiaan van Stijn --- daemon/config/config.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/daemon/config/config.go b/daemon/config/config.go index 25fc5091aba1c..645beb8c032f6 100644 --- a/daemon/config/config.go +++ b/daemon/config/config.go @@ -10,7 +10,6 @@ import ( "strings" "sync" - "github.com/containerd/containerd" "github.com/docker/docker/opts" "github.com/docker/docker/pkg/authorization" "github.com/docker/docker/registry" @@ -52,9 +51,6 @@ const ( // DefaultPluginNamespace is the name of the default containerd namespace used for plugins. DefaultPluginNamespace = "plugins.moby" - // DefaultContainerdSnapshotter is the name of the default containerd snapshotter used for creating container root fs - DefaultContainerdSnapshotter = containerd.DefaultSnapshotter - // LinuxV2RuntimeName is the runtime used to specify the containerd v2 runc shim LinuxV2RuntimeName = "io.containerd.runc.v2" From ac7ea9eefc86c538af54cc0becc931045046c793 Mon Sep 17 00:00:00 2001 From: Djordje Lukic Date: Thu, 18 Aug 2022 17:48:06 +0200 Subject: [PATCH 50/90] Add ExposedPorts and Volumes to the image returned Signed-off-by: Djordje Lukic --- daemon/containerd/image.go | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/daemon/containerd/image.go b/daemon/containerd/image.go index 8832a5dbebefb..e821f9a5f7c30 100644 --- a/daemon/containerd/image.go +++ b/daemon/containerd/image.go @@ -16,6 +16,7 @@ import ( "github.com/docker/docker/errdefs" "github.com/docker/docker/image" "github.com/docker/docker/layer" + "github.com/docker/go-connections/nat" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" @@ -43,6 +44,9 @@ func (i *ImageService) GetImage(ctx context.Context, refOrID string, options ima } tagged, err := i.client.ImageService().List(ctx, fmt.Sprintf("target.digest==%s", ii.Target().Digest.String())) + if err != nil { + return nil, err + } tags := make([]reference.Named, 0, len(tagged)) for _, i := range tagged { name, err := reference.ParseNamed(i.Name) @@ -98,17 +102,23 @@ func (i *ImageService) getImage(ctx context.Context, refOrID string) (containerd for _, id := range fs { rootfs.Append(layer.DiffID(id)) } + exposedPorts := make(nat.PortSet, len(ociimage.Config.ExposedPorts)) + for k, v := range ociimage.Config.ExposedPorts { + exposedPorts[nat.Port(k)] = v + } return ii, &image.Image{ V1Image: image.V1Image{ ID: string(desc.Digest), OS: ociimage.OS, Architecture: ociimage.Architecture, Config: &containertypes.Config{ - Entrypoint: ociimage.Config.Entrypoint, - Env: ociimage.Config.Env, - Cmd: ociimage.Config.Cmd, - User: ociimage.Config.User, - WorkingDir: ociimage.Config.WorkingDir, + Entrypoint: ociimage.Config.Entrypoint, + Env: ociimage.Config.Env, + Cmd: ociimage.Config.Cmd, + User: ociimage.Config.User, + WorkingDir: ociimage.Config.WorkingDir, + ExposedPorts: exposedPorts, + Volumes: ociimage.Config.Volumes, }, }, RootFS: rootfs, From 156ab63a091676c06cdf59d94ec97a6535e8c0e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Gronowski?= Date: Wed, 17 Aug 2022 16:18:57 +0200 Subject: [PATCH 51/90] c8d/daemon: Mount root and fill BaseFS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This fixes things that were broken due to nil BaseFS like `docker cp` and running a container with workdir override. This is more of a temporary hack than a real solution. The correct fix would be to refactor the code to make BaseFS and LayerRW an implementation detail of the old image store implementation and use the temporary mounts for the c8d implementation instead. That requires more work though. Signed-off-by: Paweł Gronowski --- daemon/containerd/mount.go | 52 ++++++++++++++++++++++++++++++++++++++ daemon/daemon.go | 39 ++-------------------------- daemon/daemon_unix.go | 10 ++++++-- daemon/image_service.go | 2 ++ daemon/images/mount.go | 51 +++++++++++++++++++++++++++++++++++++ 5 files changed, 115 insertions(+), 39 deletions(-) create mode 100644 daemon/containerd/mount.go create mode 100644 daemon/images/mount.go diff --git a/daemon/containerd/mount.go b/daemon/containerd/mount.go new file mode 100644 index 0000000000000..b0d4af19db291 --- /dev/null +++ b/daemon/containerd/mount.go @@ -0,0 +1,52 @@ +package containerd + +import ( + "context" + "fmt" + "os" + + "github.com/containerd/containerd/mount" + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/containerfs" + "github.com/sirupsen/logrus" +) + +// Mount mounts and sets container base filesystem +func (i *ImageService) Mount(ctx context.Context, container *container.Container) error { + snapshotter := i.client.SnapshotService(i.snapshotter) + mounts, err := snapshotter.Mounts(ctx, container.ID) + if err != nil { + return err + } + + tempMountLocation := os.TempDir() + + root, err := os.MkdirTemp(tempMountLocation, "rootfs-mount") + if err != nil { + return fmt.Errorf("failed to create temp dir: %w", err) + } + + if err := mount.All(mounts, root); err != nil { + return fmt.Errorf("failed to mount %s: %w", root, err) + } + + container.BaseFS = containerfs.NewLocalContainerFS(root) + return nil +} + +// Unmount unmounts the container base filesystem +func (i *ImageService) Unmount(ctx context.Context, container *container.Container) error { + root := container.BaseFS.Path() + + if err := mount.UnmountAll(root, 0); err != nil { + return fmt.Errorf("failed to unmount %s: %w", root, err) + } + + if err := os.Remove(root); err != nil { + logrus.WithError(err).WithField("dir", root).Error("failed to remove mount temp dir") + } + + container.BaseFS = nil + + return nil +} diff --git a/daemon/daemon.go b/daemon/daemon.go index ec6857a6835d0..e2aee0db879ce 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -1278,48 +1278,13 @@ func (daemon *Daemon) Shutdown(ctx context.Context) error { } // Mount sets container.BaseFS -// (is it not set coming in? why is it unset?) func (daemon *Daemon) Mount(container *container.Container) error { - if daemon.UsesSnapshotter() { - return nil - } - if container.RWLayer == nil { - return errors.New("RWLayer of container " + container.ID + " is unexpectedly nil") - } - dir, err := container.RWLayer.Mount(container.GetMountLabel()) - if err != nil { - return err - } - logrus.WithField("container", container.ID).Debugf("container mounted via layerStore: %v", dir) - - if container.BaseFS != nil && container.BaseFS.Path() != dir.Path() { - // The mount path reported by the graph driver should always be trusted on Windows, since the - // volume path for a given mounted layer may change over time. This should only be an error - // on non-Windows operating systems. - if runtime.GOOS != "windows" { - daemon.Unmount(container) - return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", - daemon.imageService.GraphDriverName(), container.ID, container.BaseFS, dir) - } - } - container.BaseFS = dir // TODO: combine these fields - return nil + return daemon.imageService.Mount(context.Background(), container) } // Unmount unsets the container base filesystem func (daemon *Daemon) Unmount(container *container.Container) error { - if daemon.UsesSnapshotter() { - return nil - } - if container.RWLayer == nil { - return errors.New("RWLayer of container " + container.ID + " is unexpectedly nil") - } - if err := container.RWLayer.Unmount(); err != nil { - logrus.WithField("container", container.ID).WithError(err).Error("error unmounting container") - return err - } - - return nil + return daemon.imageService.Unmount(context.Background(), container) } // Subnets return the IPv4 and IPv6 subnets of networks that are manager by Docker. diff --git a/daemon/daemon_unix.go b/daemon/daemon_unix.go index b3c126353d667..b246257fb9632 100644 --- a/daemon/daemon_unix.go +++ b/daemon/daemon_unix.go @@ -1362,13 +1362,19 @@ func (daemon *Daemon) registerLinks(container *container.Container, hostConfig * // conditionalMountOnStart is a platform specific helper function during the // container start to call mount. func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { - return daemon.Mount(container) + if !daemon.UsesSnapshotter() { + return daemon.Mount(container) + } + return nil } // conditionalUnmountOnCleanup is a platform specific helper function called // during the cleanup of a container to unmount. func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { - return daemon.Unmount(container) + if !daemon.UsesSnapshotter() { + return daemon.Unmount(container) + } + return nil } func copyBlkioEntry(entries []*statsV1.BlkIOEntry) []types.BlkioStatEntry { diff --git a/daemon/image_service.go b/daemon/image_service.go index 0522d320266cd..92413121020cd 100644 --- a/daemon/image_service.go +++ b/daemon/image_service.go @@ -54,6 +54,8 @@ type ImageService interface { ReleaseLayer(rwlayer layer.RWLayer) error LayerDiskUsage(ctx context.Context) (int64, error) GetContainerLayerSize(ctx context.Context, containerID string) (int64, int64, error) + Mount(ctx context.Context, container *container.Container) error + Unmount(ctx context.Context, container *container.Container) error // Windows specific diff --git a/daemon/images/mount.go b/daemon/images/mount.go new file mode 100644 index 0000000000000..383c18e71351f --- /dev/null +++ b/daemon/images/mount.go @@ -0,0 +1,51 @@ +package images + +import ( + "context" + "fmt" + "runtime" + + "github.com/docker/docker/container" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// Mount sets container.BaseFS +// (is it not set coming in? why is it unset?) +func (i *ImageService) Mount(ctx context.Context, container *container.Container) error { + if container.RWLayer == nil { + return errors.New("RWLayer of container " + container.ID + " is unexpectedly nil") + } + dir, err := container.RWLayer.Mount(container.GetMountLabel()) + if err != nil { + return err + } + logrus.WithField("container", container.ID).Debugf("container mounted via layerStore: %v", dir) + + if container.BaseFS != nil && container.BaseFS.Path() != dir.Path() { + // The mount path reported by the graph driver should always be trusted on Windows, since the + // volume path for a given mounted layer may change over time. This should only be an error + // on non-Windows operating systems. + if runtime.GOOS != "windows" { + i.Unmount(ctx, container) + return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", + i.GraphDriverName(), container.ID, container.BaseFS, dir) + } + } + container.BaseFS = dir // TODO: combine these fields + return nil +} + +// Unmount unsets the container base filesystem +func (i *ImageService) Unmount(ctx context.Context, container *container.Container) error { + if container.RWLayer == nil { + return errors.New("RWLayer of container " + container.ID + " is unexpectedly nil") + } + if err := container.RWLayer.Unmount(); err != nil { + logrus.WithField("container", container.ID).WithError(err).Error("error unmounting container") + return err + } + container.BaseFS = nil + + return nil +} From 63aaf26535eb44c664790597d4004734880fea12 Mon Sep 17 00:00:00 2001 From: Djordje Lukic Date: Fri, 19 Aug 2022 12:22:14 +0200 Subject: [PATCH 52/90] Implement image history Signed-off-by: Djordje Lukic --- daemon/containerd/image_history.go | 72 +++++++++++++++++++++++++++++- 1 file changed, 70 insertions(+), 2 deletions(-) diff --git a/daemon/containerd/image_history.go b/daemon/containerd/image_history.go index d79333a75b929..d3849f47de3fb 100644 --- a/daemon/containerd/image_history.go +++ b/daemon/containerd/image_history.go @@ -2,13 +2,81 @@ package containerd import ( "context" - "errors" + "encoding/json" + "github.com/containerd/containerd/content" imagetype "github.com/docker/docker/api/types/image" + "github.com/opencontainers/image-spec/identity" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" ) // ImageHistory returns a slice of ImageHistory structures for the specified // image name by walking the image lineage. func (i *ImageService) ImageHistory(ctx context.Context, name string) ([]*imagetype.HistoryResponseItem, error) { - return nil, errors.New("not implemented") + c8dimg, _, err := i.getImage(ctx, name) + if err != nil { + return nil, err + } + + config, err := c8dimg.Config(ctx) + if err != nil { + return nil, err + } + + blob, err := content.ReadBlob(ctx, c8dimg.ContentStore(), config) + if err != nil { + return nil, err + } + + var image ocispec.Image + if err := json.Unmarshal(blob, &image); err != nil { + return nil, err + } + + history := []*imagetype.HistoryResponseItem{} + + diffIDs, err := c8dimg.RootFS(ctx) + if err != nil { + return nil, err + } + + sizes := []int64{} + s := i.client.SnapshotService(i.snapshotter) + for i := range diffIDs { + diffIDs := diffIDs[0 : i+1] + chainID := identity.ChainID(diffIDs).String() + + use, err := s.Usage(ctx, chainID) + if err != nil { + return nil, err + } + + sizes = append(sizes, use.Size) + } + + for _, h := range image.History { + size := int64(0) + if !h.EmptyLayer { + if len(sizes) == 0 { + return nil, errors.New("unable to find the size of the layer") + } + size = sizes[0] + sizes = sizes[1:] + } + + history = append([]*imagetype.HistoryResponseItem{{ + ID: "", + Comment: h.Comment, + CreatedBy: h.CreatedBy, + Created: h.Created.Unix(), + Size: size, + }}, history...) + } + + if len(history) != 0 { + history[0].ID = c8dimg.Target().Digest.String() + } + + return history, nil } From 7a91b741af2d01c168a2da5b17aba763aa608f28 Mon Sep 17 00:00:00 2001 From: Djordje Lukic Date: Fri, 19 Aug 2022 13:07:06 +0200 Subject: [PATCH 53/90] Pass the containerd client when Applying oci options Some of the options need the client, WithUser for example. Signed-off-by: Djordje Lukic --- daemon/oci_linux.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/daemon/oci_linux.go b/daemon/oci_linux.go index 5bdd8dccfcd10..6437fa6ab7e31 100644 --- a/daemon/oci_linux.go +++ b/daemon/oci_linux.go @@ -1069,7 +1069,7 @@ func (daemon *Daemon) createSpec(ctx context.Context, c *container.Container) (r } - return &s, coci.ApplyOpts(context.Background(), nil, &containers.Container{ + return &s, coci.ApplyOpts(context.Background(), daemon.containerdCli, &containers.Container{ ID: c.ID, Snapshotter: snapshotter, SnapshotKey: snapshotKey, From 6d660b1ed8dc707ab59656a79d7e154ba13fbb0a Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Tue, 9 Aug 2022 17:03:50 +0200 Subject: [PATCH 54/90] ImageService: rename GraphDriverName to StorageDriver Make the function name more generic, as it's no longer used only for graphdrivers but also for snapshotters. Signed-off-by: Sebastiaan van Stijn --- cmd/dockerd/daemon.go | 2 +- daemon/container.go | 2 +- daemon/containerd/image.go | 2 +- daemon/containerd/service.go | 8 +++----- daemon/image_service.go | 2 +- daemon/images/mount.go | 2 +- daemon/images/service.go | 7 ++----- 7 files changed, 10 insertions(+), 15 deletions(-) diff --git a/cmd/dockerd/daemon.go b/cmd/dockerd/daemon.go index fff71847271d7..28de28af43144 100644 --- a/cmd/dockerd/daemon.go +++ b/cmd/dockerd/daemon.go @@ -307,7 +307,7 @@ func newRouterOptions(config *config.Config, d *daemon.Daemon) (routerOptions, e DNSConfig: config.DNSConfig, ApparmorProfile: daemon.DefaultApparmorProfile(), UseSnapshotter: d.UsesSnapshotter(), - Snapshotter: d.ImageService().GraphDriverName(), + Snapshotter: d.ImageService().StorageDriver(), ContainerdAddress: config.ContainerdAddr, ContainerdNamespace: config.ContainerdNamespace, }) diff --git a/daemon/container.go b/daemon/container.go index dd752f2b44ff3..d21b3386e13b2 100644 --- a/daemon/container.go +++ b/daemon/container.go @@ -154,7 +154,7 @@ func (daemon *Daemon) newContainer(name string, operatingSystem string, config * base.ImageID = imgID base.NetworkSettings = &network.Settings{IsAnonymousEndpoint: noExplicitName} base.Name = name - base.Driver = daemon.imageService.GraphDriverName() + base.Driver = daemon.imageService.StorageDriver() base.OS = operatingSystem return base, err } diff --git a/daemon/containerd/image.go b/daemon/containerd/image.go index e821f9a5f7c30..170bbb01bbd55 100644 --- a/daemon/containerd/image.go +++ b/daemon/containerd/image.go @@ -60,7 +60,7 @@ func (i *ImageService) GetImage(ctx context.Context, refOrID string, options ima References: tags, Size: size, Metadata: nil, - Driver: i.GraphDriverName(), + Driver: i.StorageDriver(), LastUpdated: ii.Metadata().UpdatedAt, } } diff --git a/daemon/containerd/service.go b/daemon/containerd/service.go index 952770f2e982e..953ff1ee963b7 100644 --- a/daemon/containerd/service.go +++ b/daemon/containerd/service.go @@ -86,11 +86,9 @@ func (i *ImageService) Cleanup() error { return nil } -// GraphDriverName returns the name of the graph drvier -// moved from Daemon.GraphDriverName, used by: -// - newContainer -// - to report an error in Daemon.Mount(container) -func (i *ImageService) GraphDriverName() string { +// StorageDriver returns the name of the default storage-driver (snapshotter) +// used by the ImageService. +func (i *ImageService) StorageDriver() string { return i.snapshotter } diff --git a/daemon/image_service.go b/daemon/image_service.go index 92413121020cd..01683d33123b6 100644 --- a/daemon/image_service.go +++ b/daemon/image_service.go @@ -73,6 +73,6 @@ type ImageService interface { DistributionServices() images.DistributionServices Children(id image.ID) []image.ID Cleanup() error - GraphDriverName() string + StorageDriver() string UpdateConfig(maxDownloads, maxUploads int) } diff --git a/daemon/images/mount.go b/daemon/images/mount.go index 383c18e71351f..d303f2d982dec 100644 --- a/daemon/images/mount.go +++ b/daemon/images/mount.go @@ -29,7 +29,7 @@ func (i *ImageService) Mount(ctx context.Context, container *container.Container if runtime.GOOS != "windows" { i.Unmount(ctx, container) return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", - i.GraphDriverName(), container.ID, container.BaseFS, dir) + i.StorageDriver(), container.ID, container.BaseFS, dir) } } container.BaseFS = dir // TODO: combine these fields diff --git a/daemon/images/service.go b/daemon/images/service.go index 92965170e8077..09d5a3644dfc4 100644 --- a/daemon/images/service.go +++ b/daemon/images/service.go @@ -178,11 +178,8 @@ func (i *ImageService) Cleanup() error { return nil } -// GraphDriverName returns the name of the graph drvier -// moved from Daemon.GraphDriverName, used by: -// - newContainer -// - to report an error in Daemon.Mount(container) -func (i *ImageService) GraphDriverName() string { +// StorageDriver returns the name of the storage driver used by the ImageService. +func (i *ImageService) StorageDriver() string { return i.layerStore.DriverName() } From e6d263328179855bb93d235e769753599980b407 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Tue, 9 Aug 2022 11:04:47 +0200 Subject: [PATCH 55/90] daemon: info: fillDriverInfo() get driver-name from ImageService Make the ImageService the source of truth for the storage-driver that's used. Signed-off-by: Sebastiaan van Stijn --- daemon/info.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/daemon/info.go b/daemon/info.go index 83e59de55fa4b..416287004280d 100644 --- a/daemon/info.go +++ b/daemon/info.go @@ -120,18 +120,18 @@ func (daemon *Daemon) SystemVersion() types.Version { } func (daemon *Daemon) fillDriverInfo(v *types.Info) { + v.Driver = daemon.imageService.StorageDriver() + v.DriverStatus = daemon.imageService.LayerStoreStatus() + const warnMsg = ` WARNING: The %s storage-driver is deprecated, and will be removed in a future release. Refer to the documentation for more information: https://docs.docker.com/go/storage-driver/` - switch daemon.graphDriver { + switch v.Driver { case "aufs", "devicemapper", "overlay": - v.Warnings = append(v.Warnings, fmt.Sprintf(warnMsg, daemon.graphDriver)) + v.Warnings = append(v.Warnings, fmt.Sprintf(warnMsg, v.Driver)) } - v.Driver = daemon.graphDriver - v.DriverStatus = daemon.imageService.LayerStoreStatus() - fillDriverWarnings(v) } From b7994b593c72d1c8fe72eaaee8c0e1e8fac85eb4 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Tue, 9 Aug 2022 11:05:59 +0200 Subject: [PATCH 56/90] daemon: containerStart(): get driver-name from ImageService Make the ImageService the source of truth for the storage-driver that's used. Signed-off-by: Sebastiaan van Stijn --- daemon/start.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/daemon/start.go b/daemon/start.go index da3c9b84c7f2e..d92f583fd1093 100644 --- a/daemon/start.go +++ b/daemon/start.go @@ -180,7 +180,7 @@ func (daemon *Daemon) containerStart(ctx context.Context, container *container.C newContainerOpts := []containerd.NewContainerOpts{} if daemon.UsesSnapshotter() { - newContainerOpts = append(newContainerOpts, containerd.WithSnapshotter(daemon.graphDriver)) + newContainerOpts = append(newContainerOpts, containerd.WithSnapshotter(daemon.ImageService().StorageDriver())) newContainerOpts = append(newContainerOpts, containerd.WithSnapshot(container.ID)) c8dImge, err := daemon.imageService.(containerdImage).GetContainerdImage(ctx, container.Config.Image, &v1.Platform{}) if err != nil { From d3ecc3b2b1376251dcbbe01d2233372d53caf536 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Tue, 9 Aug 2022 11:06:49 +0200 Subject: [PATCH 57/90] daemon: createSpec: get driver-name from ImageService Make the ImageService the source of truth for the storage-driver that's used. Signed-off-by: Sebastiaan van Stijn --- daemon/oci_linux.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/daemon/oci_linux.go b/daemon/oci_linux.go index 6437fa6ab7e31..71c3de05ada06 100644 --- a/daemon/oci_linux.go +++ b/daemon/oci_linux.go @@ -1064,9 +1064,8 @@ func (daemon *Daemon) createSpec(ctx context.Context, c *container.Container) (r snapshotter := "" snapshotKey := "" if daemon.UsesSnapshotter() { - snapshotter = daemon.graphDriver + snapshotter = daemon.ImageService().StorageDriver() snapshotKey = c.ID - } return &s, coci.ApplyOpts(context.Background(), daemon.containerdCli, &containers.Container{ From 387662a1a5dc320bb6c7f560ca88ecae0ef51dfc Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Tue, 9 Aug 2022 11:07:27 +0200 Subject: [PATCH 58/90] daemon: create(): get driver-name from ImageService Make the ImageService the source of truth for the storage-driver that's used. Signed-off-by: Sebastiaan van Stijn --- daemon/create.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/daemon/create.go b/daemon/create.go index 715f04dca1ec9..98eaf2e8004a6 100644 --- a/daemon/create.go +++ b/daemon/create.go @@ -190,7 +190,7 @@ func (daemon *Daemon) create(ctx context.Context, opts createOpts) (retC *contai return nil, err } parent := identity.ChainID(diffIDs).String() - s := daemon.containerdCli.SnapshotService(daemon.graphDriver) + s := daemon.containerdCli.SnapshotService(daemon.ImageService().StorageDriver()) if _, err := s.Prepare(ctx, ctr.ID, parent); err != nil { return nil, err } @@ -202,7 +202,7 @@ func (daemon *Daemon) create(ctx context.Context, opts createOpts) (retC *contai } ls.AddResource(ctx, lease, leases.Resource{ ID: ctr.ID, - Type: "snapshots/" + daemon.graphDriver, + Type: "snapshots/" + daemon.imageService.StorageDriver(), }) } else { // Set RWLayer for container after mount labels have been set From 9abf94458e185420c30fb98da783b85a7d0dc4c0 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Tue, 9 Aug 2022 11:11:52 +0200 Subject: [PATCH 59/90] daemon: remove daemon.graphdriver It was only used as an intermediate variable to store what's returned by layerstore.DriverName() / ImageService.GraphDriverName() Signed-off-by: Sebastiaan van Stijn --- daemon/daemon.go | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/daemon/daemon.go b/daemon/daemon.go index e2aee0db879ce..d6ac4cc713105 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -90,7 +90,6 @@ type Daemon struct { sysInfo *sysinfo.SysInfo shutdown bool idMapping idtools.IdentityMapping - graphDriver string // TODO: move graphDriver field to an InfoService PluginStore *plugin.Store // TODO: remove pluginManager *plugin.Manager linkIndex *linkIndex @@ -256,7 +255,7 @@ func (daemon *Daemon) restore(ctx context.Context) error { return } // Ignore the container if it does not support the current driver being used by the graph - if (c.Driver == "" && daemon.graphDriver == "aufs") || c.Driver == daemon.graphDriver { + if driver := daemon.imageService.StorageDriver(); (c.Driver == "" && driver == "aufs") || c.Driver == driver { if accessor, ok := daemon.imageService.(layerAccessor); ok { rwlayer, err := accessor.GetLayerByID(c.ID) if err != nil { @@ -1005,7 +1004,6 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S return nil, err } d.imageService = ctrd.NewService(d.containerdCli, d.containers, snapshotter) - d.graphDriver = snapshotter } else { layerStore, err := layer.NewStoreFromOptions(layer.StoreOptions{ Root: config.Root, @@ -1020,16 +1018,13 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S return nil, err } - // As layerstore initialization may set the driver - d.graphDriver = layerStore.DriverName() - // Configure and validate the kernels security support. Note this is a Linux/FreeBSD // operation only, so it is safe to pass *just* the runtime OS graphdriver. - if err := configureKernelSecuritySupport(config, d.graphDriver); err != nil { + if err := configureKernelSecuritySupport(config, layerStore.DriverName()); err != nil { return nil, err } - imageRoot := filepath.Join(config.Root, "image", d.graphDriver) + imageRoot := filepath.Join(config.Root, "image", layerStore.DriverName()) ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) if err != nil { return nil, err @@ -1147,7 +1142,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S logrus.WithFields(logrus.Fields{ "version": dockerversion.Version, "commit": dockerversion.GitCommit, - "graphdriver": d.graphDriver, + "graphdriver": d.ImageService().StorageDriver(), }).Info("Docker daemon") return d, nil From fd8220267185b75f59690fdfd20f82fc3ac10e5d Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Sat, 20 Aug 2022 00:29:54 +0200 Subject: [PATCH 60/90] daemon: containerStart() keep container's snapshotter Make sure we keep the snapshotter that was set for the container when starting it, instead of resetting it to the current daemon's default. Signed-off-by: Sebastiaan van Stijn --- daemon/start.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/daemon/start.go b/daemon/start.go index d92f583fd1093..a9754a0563551 100644 --- a/daemon/start.go +++ b/daemon/start.go @@ -180,7 +180,7 @@ func (daemon *Daemon) containerStart(ctx context.Context, container *container.C newContainerOpts := []containerd.NewContainerOpts{} if daemon.UsesSnapshotter() { - newContainerOpts = append(newContainerOpts, containerd.WithSnapshotter(daemon.ImageService().StorageDriver())) + newContainerOpts = append(newContainerOpts, containerd.WithSnapshotter(container.Driver)) newContainerOpts = append(newContainerOpts, containerd.WithSnapshot(container.ID)) c8dImge, err := daemon.imageService.(containerdImage).GetContainerdImage(ctx, container.Config.Image, &v1.Platform{}) if err != nil { From ce5b73f59c7519ebeb51ea545e87caae9ad13897 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Sat, 20 Aug 2022 01:18:43 +0200 Subject: [PATCH 61/90] daemon: containerStart(): optimize logic - combine the two "if daemon.UsesSnapshotter()" branches - newContainerOpts was only used if a container had to be created, so skip creating options (and look up the image) if the container was found. Signed-off-by: Sebastiaan van Stijn --- daemon/start.go | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/daemon/start.go b/daemon/start.go index a9754a0563551..4cf4aa64ed80d 100644 --- a/daemon/start.go +++ b/daemon/start.go @@ -178,24 +178,25 @@ func (daemon *Daemon) containerStart(ctx context.Context, container *container.C return err } - newContainerOpts := []containerd.NewContainerOpts{} - if daemon.UsesSnapshotter() { - newContainerOpts = append(newContainerOpts, containerd.WithSnapshotter(container.Driver)) - newContainerOpts = append(newContainerOpts, containerd.WithSnapshot(container.ID)) - c8dImge, err := daemon.imageService.(containerdImage).GetContainerdImage(ctx, container.Config.Image, &v1.Platform{}) - if err != nil { - return err - } - ctrdimg := containerd.NewImage(daemon.containerdCli, c8dImge) - newContainerOpts = append(newContainerOpts, containerd.WithImage(ctrdimg)) - } - createContainer := true + + var newContainerOpts []containerd.NewContainerOpts if daemon.UsesSnapshotter() { // When using the containerd snapshotters we want to reuse the existing containerd container - _, err := daemon.containerdCli.LoadContainer(ctx, container.ID) + _, err = daemon.containerdCli.LoadContainer(ctx, container.ID) if err == nil { createContainer = false + } else { + c8dImge, err := daemon.imageService.(containerdImage).GetContainerdImage(ctx, container.Config.Image, &v1.Platform{}) + if err != nil { + return err + } + + newContainerOpts = append(newContainerOpts, + containerd.WithSnapshotter(container.Driver), + containerd.WithSnapshot(container.ID), + containerd.WithImage(containerd.NewImage(daemon.containerdCli, c8dImge)), + ) } } From 54ce8ed5874ea7d0ad882470aa1f9b39a681cd2f Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Mon, 22 Aug 2022 18:36:03 +0200 Subject: [PATCH 62/90] info: add driver-type With this patch: mkdir -p /etc/docker/ echo '{"features":{"containerd-snapshotter":true}}' > /etc/docker/daemon.json dockerd docker info ... Storage Driver: overlayfs driver-type: io.containerd.snapshotter.v1 Logging Driver: json-file Signed-off-by: Sebastiaan van Stijn --- daemon/containerd/service.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/daemon/containerd/service.go b/daemon/containerd/service.go index 953ff1ee963b7..84bf86866d4f3 100644 --- a/daemon/containerd/service.go +++ b/daemon/containerd/service.go @@ -7,6 +7,7 @@ import ( "github.com/containerd/containerd" cerrdefs "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/plugin" "github.com/containerd/containerd/snapshots" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" @@ -70,7 +71,10 @@ func (i *ImageService) CreateLayer(container *container.Container, initFunc laye // LayerStoreStatus returns the status for each layer store // called from info.go func (i *ImageService) LayerStoreStatus() [][2]string { - return [][2]string{} + // TODO(thaJeztah) do we want to add more details about the driver here? + return [][2]string{ + {"driver-type", string(plugin.SnapshotPlugin)}, + } } // GetLayerMountID returns the mount ID for a layer From 65789b75a2f4e6ee91630bb41c73529a37dff117 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Mon, 22 Aug 2022 19:53:07 +0200 Subject: [PATCH 63/90] integration-cli: add utility to check if snapshotters are enabled Signed-off-by: Sebastiaan van Stijn --- integration-cli/docker_cli_inspect_test.go | 3 +-- integration-cli/requirements_test.go | 12 ++++++++++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/integration-cli/docker_cli_inspect_test.go b/integration-cli/docker_cli_inspect_test.go index 0fe9106585186..1daabeac5b683 100644 --- a/integration-cli/docker_cli_inspect_test.go +++ b/integration-cli/docker_cli_inspect_test.go @@ -42,8 +42,7 @@ func (s *DockerCLIInspectSuite) TestInspectImage(c *testing.T) { // fails, fix the difference in the image serialization instead of // updating this hash. imageTestID := "sha256:11f64303f0f7ffdc71f001788132bca5346831939a956e3e975c93267d89a16d" - usesContainerdSnapshotter := false // TODO(vvoland): Check for feature flag - if usesContainerdSnapshotter { + if containerdSnapshotterEnabled() { // Under containerd ID of the image is the digest of the manifest list. imageTestID = "sha256:e43ca824363c5c56016f6ede3a9035afe0e9bd43333215e0b0bde6193969725d" } diff --git a/integration-cli/requirements_test.go b/integration-cli/requirements_test.go index 3c4e19478152c..c2b16fc2f836d 100644 --- a/integration-cli/requirements_test.go +++ b/integration-cli/requirements_test.go @@ -10,6 +10,7 @@ import ( "testing" "time" + "github.com/containerd/containerd/plugin" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" "github.com/docker/docker/api/types/versions" @@ -99,6 +100,17 @@ func Devicemapper() bool { return strings.HasPrefix(testEnv.DaemonInfo.Driver, "devicemapper") } +// containerdSnapshotterEnabled checks if the daemon in the test-environment is +// configured with containerd-snapshotters enabled. +func containerdSnapshotterEnabled() bool { + for _, v := range testEnv.DaemonInfo.DriverStatus { + if v[0] == "driver-type" { + return v[1] == string(plugin.SnapshotPlugin) + } + } + return false +} + func IPv6() bool { cmd := exec.Command("test", "-f", "/proc/net/if_inet6") return cmd.Run() != nil From 0a508686e8be494bde487d50ac1cb05ba0b3f409 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Gronowski?= Date: Tue, 23 Aug 2022 17:18:17 +0200 Subject: [PATCH 64/90] c8d/progress: Fix progress not ending MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix a bug which makes the progress handler never ending if an error happens before any ongoing is added. Signed-off-by: Paweł Gronowski --- daemon/containerd/progress.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/daemon/containerd/progress.go b/daemon/containerd/progress.go index d159a76d1d5a8..81e039b6fe7a6 100644 --- a/daemon/containerd/progress.go +++ b/daemon/containerd/progress.go @@ -29,7 +29,6 @@ func showProgress(ctx context.Context, ongoing *jobs, w io.Writer, updateFunc up out = streamformatter.NewJSONProgressOutput(w, false) ticker = time.NewTicker(100 * time.Millisecond) start = time.Now() - done bool ) for _, j := range ongoing.Jobs() { @@ -54,12 +53,9 @@ func showProgress(ctx context.Context, ongoing *jobs, w io.Writer, updateFunc up logrus.WithError(err).Error("Updating progress failed") return } - - if done { - return - } case <-ctx.Done(): - done = true + updateFunc(ctx, ongoing, out, start) + return } } }() From c7f29d811d81c28dc1d9fe7c3da7b112b0cf3f97 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Wed, 24 Aug 2022 15:27:38 +0200 Subject: [PATCH 65/90] image commit: use configured snapshotter instead of default Signed-off-by: Sebastiaan van Stijn --- daemon/containerd/image_commit.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/daemon/containerd/image_commit.go b/daemon/containerd/image_commit.go index d3d479fbed2cb..e31ec7d8bffae 100644 --- a/daemon/containerd/image_commit.go +++ b/daemon/containerd/image_commit.go @@ -74,7 +74,7 @@ func (i *ImageService) CommitImage(ctx context.Context, cc backend.CommitConfig) var ( differ = i.client.DiffService() - sn = i.client.SnapshotService(containerd.DefaultSnapshotter) + sn = i.client.SnapshotService(i.snapshotter) ) // Don't gc me and clean the dirty data after 1 hour! @@ -100,7 +100,7 @@ func (i *ImageService) CommitImage(ctx context.Context, cc backend.CommitConfig) } layers := append(ocimanifest.Layers, diffLayerDesc) - commitManifestDesc, configDigest, err := writeContentsForImage(ctx, containerd.DefaultSnapshotter, baseImg, imageConfig, layers) + commitManifestDesc, configDigest, err := writeContentsForImage(ctx, i.snapshotter, baseImg, imageConfig, layers) if err != nil { return "", err } From c68323010ab5cf80beb7c825d4a2da11bbc541e2 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Wed, 24 Aug 2022 15:34:27 +0200 Subject: [PATCH 66/90] daemon: remove SnapshotterFromGraphDriver mapping It was decided in upstream to not do mapping of graphdriver-names to snapshotters, and instead require the actual name of the snapshotter to be used when configuring the daemon's storage driver. This patch removes the code that was used for mapping to make it closer to upstream. Signed-off-by: Sebastiaan van Stijn --- daemon/containerd/snapshotters.go | 19 --------- daemon/containerd/snapshotters_test.go | 58 -------------------------- daemon/daemon.go | 17 ++++---- 3 files changed, 8 insertions(+), 86 deletions(-) delete mode 100644 daemon/containerd/snapshotters.go delete mode 100644 daemon/containerd/snapshotters_test.go diff --git a/daemon/containerd/snapshotters.go b/daemon/containerd/snapshotters.go deleted file mode 100644 index bd9e4766827a7..0000000000000 --- a/daemon/containerd/snapshotters.go +++ /dev/null @@ -1,19 +0,0 @@ -package containerd - -import "github.com/containerd/containerd" - -// SnapshotterFromGraphDriver returns the containerd snapshotter name based on -// the supplied graphdriver name. It handles both legacy names and translates -// them into corresponding containerd snapshotter names. -func SnapshotterFromGraphDriver(graphDriver string) string { - switch graphDriver { - case "overlay", "overlay2": - return "overlayfs" - case "windowsfilter": - return "windows" - case "": - return containerd.DefaultSnapshotter - default: - return graphDriver - } -} diff --git a/daemon/containerd/snapshotters_test.go b/daemon/containerd/snapshotters_test.go deleted file mode 100644 index 9d05794bb5cfa..0000000000000 --- a/daemon/containerd/snapshotters_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package containerd - -import ( - "testing" - - "github.com/containerd/containerd" - "gotest.tools/v3/assert" -) - -func TestSnapshotterFromGraphDriver(t *testing.T) { - testCases := []struct { - desc string - input string - expected string - }{ - { - desc: "empty defaults to containerd default", - input: "", - expected: containerd.DefaultSnapshotter, - }, - { - desc: "overlay -> overlayfs", - input: "overlay", - expected: "overlayfs", - }, - { - desc: "overlay2 -> overlayfs", - input: "overlay2", - expected: "overlayfs", - }, - { - desc: "windowsfilter -> windows", - input: "windowsfilter", - expected: "windows", - }, - { - desc: "containerd overlayfs", - input: "overlayfs", - expected: "overlayfs", - }, - { - desc: "containerd zfs", - input: "zfs", - expected: "zfs", - }, - { - desc: "unknown is unchanged", - input: "somefuturesnapshotter", - expected: "somefuturesnapshotter", - }, - } - for _, tc := range testCases { - tc := tc - t.Run(tc.desc, func(t *testing.T) { - assert.Equal(t, SnapshotterFromGraphDriver(tc.input), tc.expected) - }) - } -} diff --git a/daemon/daemon.go b/daemon/daemon.go index d6ac4cc713105..27169499ee716 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -987,28 +987,27 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S // Unix platforms however run a single graphdriver for all containers, and it can // be set through an environment variable, a daemon start parameter, or chosen through // initialization of the layerstore through driver priority order for example. - graphDriver := os.Getenv("DOCKER_DRIVER") + driverName := os.Getenv("DOCKER_DRIVER") if isWindows { - graphDriver = "windowsfilter" - } else if graphDriver != "" { - logrus.Infof("Setting the storage driver from the $DOCKER_DRIVER environment variable (%s)", graphDriver) + driverName = "windowsfilter" + } else if driverName != "" { + logrus.Infof("Setting the storage driver from the $DOCKER_DRIVER environment variable (%s)", driverName) } else { - graphDriver = config.GraphDriver + driverName = config.GraphDriver } if d.UsesSnapshotter() { - snapshotter := ctrd.SnapshotterFromGraphDriver(graphDriver) // Configure and validate the kernels security support. Note this is a Linux/FreeBSD // operation only, so it is safe to pass *just* the runtime OS graphdriver. - if err := configureKernelSecuritySupport(config, snapshotter); err != nil { + if err := configureKernelSecuritySupport(config, driverName); err != nil { return nil, err } - d.imageService = ctrd.NewService(d.containerdCli, d.containers, snapshotter) + d.imageService = ctrd.NewService(d.containerdCli, d.containers, driverName) } else { layerStore, err := layer.NewStoreFromOptions(layer.StoreOptions{ Root: config.Root, MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), - GraphDriver: graphDriver, + GraphDriver: driverName, GraphDriverOptions: config.GraphOptions, IDMapping: idMapping, PluginGetter: d.PluginStore, From d6d56444acdad9f69ad681cfd7d8b91c0da61d05 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Thu, 25 Aug 2022 09:52:04 +0200 Subject: [PATCH 67/90] registry: move v1 endpoint tests to endpoint_test.go Moves the TestPingRegistryEndpoint and TestEndpoint tests. Signed-off-by: Sebastiaan van Stijn --- registry/endpoint_test.go | 98 ++++++++++++++++++++++++++++++ registry/registry_test.go | 124 -------------------------------------- 2 files changed, 98 insertions(+), 124 deletions(-) diff --git a/registry/endpoint_test.go b/registry/endpoint_test.go index e36db56a3352a..53bdfc788fc9d 100644 --- a/registry/endpoint_test.go +++ b/registry/endpoint_test.go @@ -4,9 +4,107 @@ import ( "net/http" "net/http/httptest" "net/url" + "os" + "strings" "testing" + + "github.com/docker/docker/api/types/registry" + "gotest.tools/v3/assert" + "gotest.tools/v3/skip" ) +func TestPingRegistryEndpoint(t *testing.T) { + skip.If(t, os.Getuid() != 0, "skipping test that requires root") + testPing := func(index *registry.IndexInfo, expectedStandalone bool, assertMessage string) { + ep, err := newV1Endpoint(index, "", nil) + if err != nil { + t.Fatal(err) + } + regInfo, err := ep.ping() + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, regInfo.Standalone, expectedStandalone, assertMessage) + } + + testPing(makeIndex("/v1/"), true, "Expected standalone to be true (default)") + testPing(makeHTTPSIndex("/v1/"), true, "Expected standalone to be true (default)") + testPing(makePublicIndex(), false, "Expected standalone to be false for public index") +} + +func TestEndpoint(t *testing.T) { + skip.If(t, os.Getuid() != 0, "skipping test that requires root") + // Simple wrapper to fail test if err != nil + expandEndpoint := func(index *registry.IndexInfo) *v1Endpoint { + endpoint, err := newV1Endpoint(index, "", nil) + if err != nil { + t.Fatal(err) + } + return endpoint + } + + assertInsecureIndex := func(index *registry.IndexInfo) { + index.Secure = true + _, err := newV1Endpoint(index, "", nil) + assert.ErrorContains(t, err, "insecure-registry", index.Name+": Expected insecure-registry error for insecure index") + index.Secure = false + } + + assertSecureIndex := func(index *registry.IndexInfo) { + index.Secure = true + _, err := newV1Endpoint(index, "", nil) + assert.ErrorContains(t, err, "certificate signed by unknown authority", index.Name+": Expected cert error for secure index") + index.Secure = false + } + + index := ®istry.IndexInfo{} + index.Name = makeURL("/v1/") + endpoint := expandEndpoint(index) + assert.Equal(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) + assertInsecureIndex(index) + + index.Name = makeURL("") + endpoint = expandEndpoint(index) + assert.Equal(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") + assertInsecureIndex(index) + + httpURL := makeURL("") + index.Name = strings.SplitN(httpURL, "://", 2)[1] + endpoint = expandEndpoint(index) + assert.Equal(t, endpoint.String(), httpURL+"/v1/", index.Name+": Expected endpoint to be "+httpURL+"/v1/") + assertInsecureIndex(index) + + index.Name = makeHTTPSURL("/v1/") + endpoint = expandEndpoint(index) + assert.Equal(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) + assertSecureIndex(index) + + index.Name = makeHTTPSURL("") + endpoint = expandEndpoint(index) + assert.Equal(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") + assertSecureIndex(index) + + httpsURL := makeHTTPSURL("") + index.Name = strings.SplitN(httpsURL, "://", 2)[1] + endpoint = expandEndpoint(index) + assert.Equal(t, endpoint.String(), httpsURL+"/v1/", index.Name+": Expected endpoint to be "+httpsURL+"/v1/") + assertSecureIndex(index) + + badEndpoints := []string{ + "http://127.0.0.1/v1/", + "https://127.0.0.1/v1/", + "http://127.0.0.1", + "https://127.0.0.1", + "127.0.0.1", + } + for _, address := range badEndpoints { + index.Name = address + _, err := newV1Endpoint(index, "", nil) + assert.Check(t, err != nil, "Expected error while expanding bad endpoint: %s", address) + } +} + func TestEndpointParse(t *testing.T) { testData := []struct { str string diff --git a/registry/registry_test.go b/registry/registry_test.go index 889064e0cc944..592dcf7a50f24 100644 --- a/registry/registry_test.go +++ b/registry/registry_test.go @@ -4,139 +4,15 @@ import ( "net/http" "net/http/httputil" "os" - "strings" "testing" "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/client/transport" "github.com/docker/docker/api/types/registry" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" "gotest.tools/v3/skip" ) -func spawnTestRegistrySession(t *testing.T) *session { - authConfig := ®istry.AuthConfig{} - endpoint, err := newV1Endpoint(makeIndex("/v1/"), "", nil) - if err != nil { - t.Fatal(err) - } - userAgent := "docker test client" - var tr http.RoundTripper = debugTransport{newTransport(nil), t.Log} - tr = transport.NewTransport(newAuthTransport(tr, authConfig, false), Headers(userAgent, nil)...) - client := httpClient(tr) - - if err := authorizeClient(client, authConfig, endpoint); err != nil { - t.Fatal(err) - } - r := newSession(client, endpoint) - - // In a normal scenario for the v1 registry, the client should send a `X-Docker-Token: true` - // header while authenticating, in order to retrieve a token that can be later used to - // perform authenticated actions. - // - // The mock v1 registry does not support that, (TODO(tiborvass): support it), instead, - // it will consider authenticated any request with the header `X-Docker-Token: fake-token`. - // - // Because we know that the client's transport is an `*authTransport` we simply cast it, - // in order to set the internal cached token to the fake token, and thus send that fake token - // upon every subsequent requests. - r.client.Transport.(*authTransport).token = []string{"fake-token"} - return r -} - -func TestPingRegistryEndpoint(t *testing.T) { - skip.If(t, os.Getuid() != 0, "skipping test that requires root") - testPing := func(index *registry.IndexInfo, expectedStandalone bool, assertMessage string) { - ep, err := newV1Endpoint(index, "", nil) - if err != nil { - t.Fatal(err) - } - regInfo, err := ep.ping() - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, regInfo.Standalone, expectedStandalone, assertMessage) - } - - testPing(makeIndex("/v1/"), true, "Expected standalone to be true (default)") - testPing(makeHTTPSIndex("/v1/"), true, "Expected standalone to be true (default)") - testPing(makePublicIndex(), false, "Expected standalone to be false for public index") -} - -func TestEndpoint(t *testing.T) { - skip.If(t, os.Getuid() != 0, "skipping test that requires root") - // Simple wrapper to fail test if err != nil - expandEndpoint := func(index *registry.IndexInfo) *v1Endpoint { - endpoint, err := newV1Endpoint(index, "", nil) - if err != nil { - t.Fatal(err) - } - return endpoint - } - - assertInsecureIndex := func(index *registry.IndexInfo) { - index.Secure = true - _, err := newV1Endpoint(index, "", nil) - assert.ErrorContains(t, err, "insecure-registry", index.Name+": Expected insecure-registry error for insecure index") - index.Secure = false - } - - assertSecureIndex := func(index *registry.IndexInfo) { - index.Secure = true - _, err := newV1Endpoint(index, "", nil) - assert.ErrorContains(t, err, "certificate signed by unknown authority", index.Name+": Expected cert error for secure index") - index.Secure = false - } - - index := ®istry.IndexInfo{} - index.Name = makeURL("/v1/") - endpoint := expandEndpoint(index) - assert.Equal(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) - assertInsecureIndex(index) - - index.Name = makeURL("") - endpoint = expandEndpoint(index) - assert.Equal(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") - assertInsecureIndex(index) - - httpURL := makeURL("") - index.Name = strings.SplitN(httpURL, "://", 2)[1] - endpoint = expandEndpoint(index) - assert.Equal(t, endpoint.String(), httpURL+"/v1/", index.Name+": Expected endpoint to be "+httpURL+"/v1/") - assertInsecureIndex(index) - - index.Name = makeHTTPSURL("/v1/") - endpoint = expandEndpoint(index) - assert.Equal(t, endpoint.String(), index.Name, "Expected endpoint to be "+index.Name) - assertSecureIndex(index) - - index.Name = makeHTTPSURL("") - endpoint = expandEndpoint(index) - assert.Equal(t, endpoint.String(), index.Name+"/v1/", index.Name+": Expected endpoint to be "+index.Name+"/v1/") - assertSecureIndex(index) - - httpsURL := makeHTTPSURL("") - index.Name = strings.SplitN(httpsURL, "://", 2)[1] - endpoint = expandEndpoint(index) - assert.Equal(t, endpoint.String(), httpsURL+"/v1/", index.Name+": Expected endpoint to be "+httpsURL+"/v1/") - assertSecureIndex(index) - - badEndpoints := []string{ - "http://127.0.0.1/v1/", - "https://127.0.0.1/v1/", - "http://127.0.0.1", - "https://127.0.0.1", - "127.0.0.1", - } - for _, address := range badEndpoints { - index.Name = address - _, err := newV1Endpoint(index, "", nil) - assert.Check(t, err != nil, "Expected error while expanding bad endpoint: %s", address) - } -} - func TestParseRepositoryInfo(t *testing.T) { type staticRepositoryInfo struct { Index *registry.IndexInfo From e666f99956f3835d4f08bc309a4405e0894ee8db Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Thu, 25 Aug 2022 09:59:30 +0200 Subject: [PATCH 68/90] registry: rename v1-endpoint tests to have a common prefix Signed-off-by: Sebastiaan van Stijn --- registry/endpoint_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/registry/endpoint_test.go b/registry/endpoint_test.go index 53bdfc788fc9d..a4d689a4627db 100644 --- a/registry/endpoint_test.go +++ b/registry/endpoint_test.go @@ -13,7 +13,7 @@ import ( "gotest.tools/v3/skip" ) -func TestPingRegistryEndpoint(t *testing.T) { +func TestV1EndpointPing(t *testing.T) { skip.If(t, os.Getuid() != 0, "skipping test that requires root") testPing := func(index *registry.IndexInfo, expectedStandalone bool, assertMessage string) { ep, err := newV1Endpoint(index, "", nil) @@ -33,7 +33,7 @@ func TestPingRegistryEndpoint(t *testing.T) { testPing(makePublicIndex(), false, "Expected standalone to be false for public index") } -func TestEndpoint(t *testing.T) { +func TestV1Endpoint(t *testing.T) { skip.If(t, os.Getuid() != 0, "skipping test that requires root") // Simple wrapper to fail test if err != nil expandEndpoint := func(index *registry.IndexInfo) *v1Endpoint { @@ -105,7 +105,7 @@ func TestEndpoint(t *testing.T) { } } -func TestEndpointParse(t *testing.T) { +func TestV1EndpointParse(t *testing.T) { testData := []struct { str string expected string @@ -132,7 +132,7 @@ func TestEndpointParse(t *testing.T) { } } -func TestEndpointParseInvalid(t *testing.T) { +func TestV1EndpointParseInvalid(t *testing.T) { testData := []string{ "http://0.0.0.0:5000/v2/", } @@ -146,7 +146,7 @@ func TestEndpointParseInvalid(t *testing.T) { // Ensure that a registry endpoint that responds with a 401 only is determined // to be a valid v1 registry endpoint -func TestValidateEndpoint(t *testing.T) { +func TestV1EndpointValidate(t *testing.T) { requireBasicAuthHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Header().Add("WWW-Authenticate", `Basic realm="localhost"`) w.WriteHeader(http.StatusUnauthorized) From 5fa0225615d3fdc069e847b6806852416685b26a Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Thu, 25 Aug 2022 10:13:03 +0200 Subject: [PATCH 69/90] registry: move search-related code to separate files Also touching-up some comments. Signed-off-by: Sebastiaan van Stijn --- registry/auth.go | 6 +- registry/config.go | 28 ++----- registry/registry_test.go | 14 ---- registry/search.go | 19 +++++ .../{endpoint_v1.go => search_endpoint_v1.go} | 6 +- ...int_test.go => search_endpoint_v1_test.go} | 0 registry/search_service.go | 83 +++++++++++++++++++ registry/{session.go => search_session.go} | 0 registry/search_test.go | 54 ++++++++++++ registry/service.go | 74 ----------------- 10 files changed, 173 insertions(+), 111 deletions(-) create mode 100644 registry/search.go rename registry/{endpoint_v1.go => search_endpoint_v1.go} (97%) rename registry/{endpoint_test.go => search_endpoint_v1_test.go} (100%) create mode 100644 registry/search_service.go rename registry/{session.go => search_session.go} (100%) create mode 100644 registry/search_test.go diff --git a/registry/auth.go b/registry/auth.go index dd75a49f38db5..471968b7508a6 100644 --- a/registry/auth.go +++ b/registry/auth.go @@ -124,8 +124,10 @@ func v2AuthHTTPClient(endpoint *url.URL, authTransport http.RoundTripper, modifi }, nil } -// ConvertToHostname converts a registry url which has http|https prepended -// to just an hostname. +// ConvertToHostname normalizes a registry URL which has http|https prepended +// to just its hostname. It is used to match credentials, which may be either +// stored as hostname or as hostname including scheme (in legacy configuration +// files). func ConvertToHostname(url string) string { stripped := url if strings.HasPrefix(url, "http://") { diff --git a/registry/config.go b/registry/config.go index 2766306ac2093..6e8c2f4545ca5 100644 --- a/registry/config.go +++ b/registry/config.go @@ -319,7 +319,8 @@ func isCIDRMatch(cidrs []*registry.NetIPNet, URLHost string) bool { return false } -// ValidateMirror validates an HTTP(S) registry mirror +// ValidateMirror validates an HTTP(S) registry mirror. It is used by the daemon +// to validate the daemon configuration. func ValidateMirror(val string) (string, error) { uri, err := url.Parse(val) if err != nil { @@ -339,7 +340,8 @@ func ValidateMirror(val string) (string, error) { return strings.TrimSuffix(val, "/") + "/", nil } -// ValidateIndexName validates an index name. +// ValidateIndexName validates an index name. It is used by the daemon to validate +// the daemon configuration. func ValidateIndexName(val string) (string, error) { // TODO: upstream this to check to reference package if val == "index.docker.io" { @@ -425,24 +427,10 @@ func newRepositoryInfo(config *serviceConfig, name reference.Named) (*Repository }, nil } -// ParseRepositoryInfo performs the breakdown of a repository name into a RepositoryInfo, but -// lacks registry configuration. +// ParseRepositoryInfo performs the breakdown of a repository name into a +// RepositoryInfo, but lacks registry configuration. +// +// It is used by the Docker cli to interact with registry-related endpoints. func ParseRepositoryInfo(reposName reference.Named) (*RepositoryInfo, error) { return newRepositoryInfo(emptyServiceConfig, reposName) } - -// ParseSearchIndexInfo will use repository name to get back an indexInfo. -// -// TODO(thaJeztah) this function is only used by the CLI, and used to get -// information of the registry (to provide credentials if needed). We should -// move this function (or equivalent) to the CLI, as it's doing too much just -// for that. -func ParseSearchIndexInfo(reposName string) (*registry.IndexInfo, error) { - indexName, _ := splitReposSearchTerm(reposName) - - indexInfo, err := newIndexInfo(emptyServiceConfig, indexName) - if err != nil { - return nil, err - } - return indexInfo, nil -} diff --git a/registry/registry_test.go b/registry/registry_test.go index 592dcf7a50f24..2667bb9005ad8 100644 --- a/registry/registry_test.go +++ b/registry/registry_test.go @@ -419,20 +419,6 @@ func TestMirrorEndpointLookup(t *testing.T) { } } -func TestSearchRepositories(t *testing.T) { - r := spawnTestRegistrySession(t) - results, err := r.searchRepositories("fakequery", 25) - if err != nil { - t.Fatal(err) - } - if results == nil { - t.Fatal("Expected non-nil SearchResults object") - } - assert.Equal(t, results.NumResults, 1, "Expected 1 search results") - assert.Equal(t, results.Query, "fakequery", "Expected 'fakequery' as query") - assert.Equal(t, results.Results[0].StarCount, 42, "Expected 'fakeimage' to have 42 stars") -} - func TestTrustedLocation(t *testing.T) { for _, url := range []string{"http://example.com", "https://example.com:7777", "http://docker.io", "http://test.docker.com", "https://fakedocker.com"} { req, _ := http.NewRequest(http.MethodGet, url, nil) diff --git a/registry/search.go b/registry/search.go new file mode 100644 index 0000000000000..7c82c6605f2e4 --- /dev/null +++ b/registry/search.go @@ -0,0 +1,19 @@ +package registry + +import "github.com/docker/docker/api/types/registry" + +// ParseSearchIndexInfo will use repository name to get back an indexInfo. +// +// TODO(thaJeztah) this function is only used by the CLI, and used to get +// information of the registry (to provide credentials if needed). We should +// move this function (or equivalent) to the CLI, as it's doing too much just +// for that. +func ParseSearchIndexInfo(reposName string) (*registry.IndexInfo, error) { + indexName, _ := splitReposSearchTerm(reposName) + + indexInfo, err := newIndexInfo(emptyServiceConfig, indexName) + if err != nil { + return nil, err + } + return indexInfo, nil +} diff --git a/registry/endpoint_v1.go b/registry/search_endpoint_v1.go similarity index 97% rename from registry/endpoint_v1.go rename to registry/search_endpoint_v1.go index c7e930c8ad91d..fbe02c2cf1d6d 100644 --- a/registry/endpoint_v1.go +++ b/registry/search_endpoint_v1.go @@ -41,7 +41,11 @@ func newV1Endpoint(index *registry.IndexInfo, userAgent string, metaHeaders http return nil, err } - endpoint, err := newV1EndpointFromStr(GetAuthConfigKey(index), tlsConfig, userAgent, metaHeaders) + indexURL := index.Name + if index.Official { + indexURL = IndexServer + } + endpoint, err := newV1EndpointFromStr(indexURL, tlsConfig, userAgent, metaHeaders) if err != nil { return nil, err } diff --git a/registry/endpoint_test.go b/registry/search_endpoint_v1_test.go similarity index 100% rename from registry/endpoint_test.go rename to registry/search_endpoint_v1_test.go diff --git a/registry/search_service.go b/registry/search_service.go new file mode 100644 index 0000000000000..2d47986e7b8b1 --- /dev/null +++ b/registry/search_service.go @@ -0,0 +1,83 @@ +package registry + +import ( + "context" + "net/http" + "strings" + + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/docker/api/types/registry" + "github.com/sirupsen/logrus" +) + +// Search queries the public registry for images matching the specified +// search terms, and returns the results. +func (s *defaultService) Search(_ context.Context, term string, limit int, authConfig *registry.AuthConfig, userAgent string, headers map[string][]string) (*registry.SearchResults, error) { + // TODO Use ctx when searching for repositories + if hasScheme(term) { + return nil, invalidParamf("invalid repository name: repository name (%s) should not have a scheme", term) + } + + indexName, remoteName := splitReposSearchTerm(term) + + // Search is a long-running operation, just lock s.config to avoid block others. + s.mu.RLock() + index, err := newIndexInfo(s.config, indexName) + s.mu.RUnlock() + + if err != nil { + return nil, err + } + if index.Official { + // If pull "library/foo", it's stored locally under "foo" + remoteName = strings.TrimPrefix(remoteName, "library/") + } + + endpoint, err := newV1Endpoint(index, userAgent, headers) + if err != nil { + return nil, err + } + + var client *http.Client + if authConfig != nil && authConfig.IdentityToken != "" && authConfig.Username != "" { + creds := NewStaticCredentialStore(authConfig) + scopes := []auth.Scope{ + auth.RegistryScope{ + Name: "catalog", + Actions: []string{"search"}, + }, + } + + modifiers := Headers(userAgent, nil) + v2Client, err := v2AuthHTTPClient(endpoint.URL, endpoint.client.Transport, modifiers, creds, scopes) + if err != nil { + return nil, err + } + // Copy non transport http client features + v2Client.Timeout = endpoint.client.Timeout + v2Client.CheckRedirect = endpoint.client.CheckRedirect + v2Client.Jar = endpoint.client.Jar + + logrus.Debugf("using v2 client for search to %s", endpoint.URL) + client = v2Client + } else { + client = endpoint.client + if err := authorizeClient(client, authConfig, endpoint); err != nil { + return nil, err + } + } + + return newSession(client, endpoint).searchRepositories(remoteName, limit) +} + +// splitReposSearchTerm breaks a search term into an index name and remote name +func splitReposSearchTerm(reposName string) (string, string) { + nameParts := strings.SplitN(reposName, "/", 2) + if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && + !strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") { + // This is a Docker Hub repository (ex: samalba/hipache or ubuntu), + // use the default Docker Hub registry (docker.io) + return IndexName, reposName + } + return nameParts[0], nameParts[1] +} diff --git a/registry/session.go b/registry/search_session.go similarity index 100% rename from registry/session.go rename to registry/search_session.go diff --git a/registry/search_test.go b/registry/search_test.go new file mode 100644 index 0000000000000..fbbae9180342f --- /dev/null +++ b/registry/search_test.go @@ -0,0 +1,54 @@ +package registry + +import ( + "net/http" + "testing" + + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/api/types/registry" + "gotest.tools/v3/assert" +) + +func TestSearchRepositories(t *testing.T) { + r := spawnTestRegistrySession(t) + results, err := r.searchRepositories("fakequery", 25) + if err != nil { + t.Fatal(err) + } + if results == nil { + t.Fatal("Expected non-nil SearchResults object") + } + assert.Equal(t, results.NumResults, 1, "Expected 1 search results") + assert.Equal(t, results.Query, "fakequery", "Expected 'fakequery' as query") + assert.Equal(t, results.Results[0].StarCount, 42, "Expected 'fakeimage' to have 42 stars") +} + +func spawnTestRegistrySession(t *testing.T) *session { + authConfig := ®istry.AuthConfig{} + endpoint, err := newV1Endpoint(makeIndex("/v1/"), "", nil) + if err != nil { + t.Fatal(err) + } + userAgent := "docker test client" + var tr http.RoundTripper = debugTransport{newTransport(nil), t.Log} + tr = transport.NewTransport(newAuthTransport(tr, authConfig, false), Headers(userAgent, nil)...) + client := httpClient(tr) + + if err := authorizeClient(client, authConfig, endpoint); err != nil { + t.Fatal(err) + } + r := newSession(client, endpoint) + + // In a normal scenario for the v1 registry, the client should send a `X-Docker-Token: true` + // header while authenticating, in order to retrieve a token that can be later used to + // perform authenticated actions. + // + // The mock v1 registry does not support that, (TODO(tiborvass): support it), instead, + // it will consider authenticated any request with the header `X-Docker-Token: fake-token`. + // + // Because we know that the client's transport is an `*authTransport` we simply cast it, + // in order to set the internal cached token to the fake token, and thus send that fake token + // upon every subsequent requests. + r.client.Transport.(*authTransport).token = []string{"fake-token"} + return r +} diff --git a/registry/service.go b/registry/service.go index a4453bb17ac7c..f3e688c134861 100644 --- a/registry/service.go +++ b/registry/service.go @@ -3,13 +3,11 @@ package registry // import "github.com/docker/docker/registry" import ( "context" "crypto/tls" - "net/http" "net/url" "strings" "sync" "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/client/auth" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/errdefs" "github.com/sirupsen/logrus" @@ -116,78 +114,6 @@ func (s *defaultService) Auth(ctx context.Context, authConfig *registry.AuthConf return "", "", err } -// splitReposSearchTerm breaks a search term into an index name and remote name -func splitReposSearchTerm(reposName string) (string, string) { - nameParts := strings.SplitN(reposName, "/", 2) - if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && - !strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") { - // This is a Docker Hub repository (ex: samalba/hipache or ubuntu), - // use the default Docker Hub registry (docker.io) - return IndexName, reposName - } - return nameParts[0], nameParts[1] -} - -// Search queries the public registry for images matching the specified -// search terms, and returns the results. -func (s *defaultService) Search(ctx context.Context, term string, limit int, authConfig *registry.AuthConfig, userAgent string, headers map[string][]string) (*registry.SearchResults, error) { - // TODO Use ctx when searching for repositories - if hasScheme(term) { - return nil, invalidParamf("invalid repository name: repository name (%s) should not have a scheme", term) - } - - indexName, remoteName := splitReposSearchTerm(term) - - // Search is a long-running operation, just lock s.config to avoid block others. - s.mu.RLock() - index, err := newIndexInfo(s.config, indexName) - s.mu.RUnlock() - - if err != nil { - return nil, err - } - if index.Official { - // If pull "library/foo", it's stored locally under "foo" - remoteName = strings.TrimPrefix(remoteName, "library/") - } - - endpoint, err := newV1Endpoint(index, userAgent, headers) - if err != nil { - return nil, err - } - - var client *http.Client - if authConfig != nil && authConfig.IdentityToken != "" && authConfig.Username != "" { - creds := NewStaticCredentialStore(authConfig) - scopes := []auth.Scope{ - auth.RegistryScope{ - Name: "catalog", - Actions: []string{"search"}, - }, - } - - modifiers := Headers(userAgent, nil) - v2Client, err := v2AuthHTTPClient(endpoint.URL, endpoint.client.Transport, modifiers, creds, scopes) - if err != nil { - return nil, err - } - // Copy non transport http client features - v2Client.Timeout = endpoint.client.Timeout - v2Client.CheckRedirect = endpoint.client.CheckRedirect - v2Client.Jar = endpoint.client.Jar - - logrus.Debugf("using v2 client for search to %s", endpoint.URL) - client = v2Client - } else { - client = endpoint.client - if err := authorizeClient(client, authConfig, endpoint); err != nil { - return nil, err - } - } - - return newSession(client, endpoint).searchRepositories(remoteName, limit) -} - // ResolveRepository splits a repository name into its components // and configuration of the associated registry. func (s *defaultService) ResolveRepository(name reference.Named) (*RepositoryInfo, error) { From 9bcec34562dd608bf59331b178f67ed3cac0ff49 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Fri, 11 Mar 2022 16:36:06 +0100 Subject: [PATCH 70/90] daemon: split image search to a separate service This extracts the image search to its own service, as searching a registry for images does not depend on functionality of the image service. Signed-off-by: Sebastiaan van Stijn --- api/server/router/image/backend.go | 8 +++- api/server/router/image/image.go | 4 +- api/server/router/image/image_routes.go | 7 ++- api/types/registry/registry.go | 16 +++++++ cmd/dockerd/daemon.go | 1 + daemon/containerd/image_search.go | 18 -------- daemon/daemon.go | 12 ++++++ daemon/image_service.go | 1 - daemon/search/errors.go | 18 ++++++++ .../image_search.go => search/service.go} | 43 +++++++++++++------ .../service_test.go} | 24 ++++++----- registry/search_service.go | 27 +++++++++++- registry/service.go | 1 - 13 files changed, 131 insertions(+), 49 deletions(-) delete mode 100644 daemon/containerd/image_search.go create mode 100644 daemon/search/errors.go rename daemon/{images/image_search.go => search/service.go} (57%) rename daemon/{images/image_search_test.go => search/service_test.go} (93%) diff --git a/api/server/router/image/backend.go b/api/server/router/image/backend.go index 84ea9da4cd5b7..40e2a21ae656d 100644 --- a/api/server/router/image/backend.go +++ b/api/server/router/image/backend.go @@ -38,5 +38,11 @@ type importExportBackend interface { type registryBackend interface { PullImage(ctx context.Context, image, tag string, platform *specs.Platform, metaHeaders map[string][]string, authConfig *registry.AuthConfig, outStream io.Writer) error PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *registry.AuthConfig, outStream io.Writer) error - SearchRegistryForImages(ctx context.Context, searchFilters filters.Args, term string, limit int, authConfig *registry.AuthConfig, metaHeaders map[string][]string) (*registry.SearchResults, error) +} + +// SearchBackend provides the backend to search registries for images. +type SearchBackend interface { + // SearchImages queries the registry for images matching the given term and + // options. + SearchImages(ctx context.Context, term string, opts registry.SearchOpts) (*registry.SearchResults, error) } diff --git a/api/server/router/image/image.go b/api/server/router/image/image.go index e7ab7f0b644ae..f08faeef38121 100644 --- a/api/server/router/image/image.go +++ b/api/server/router/image/image.go @@ -10,6 +10,7 @@ import ( // imageRouter is a router to talk with the image controller type imageRouter struct { backend Backend + searchBackend SearchBackend referenceBackend reference.Store imageStore image.Store layerStore layer.Store @@ -17,9 +18,10 @@ type imageRouter struct { } // NewRouter initializes a new image router -func NewRouter(backend Backend, referenceBackend reference.Store, imageStore image.Store, layerStore layer.Store) router.Router { +func NewRouter(backend Backend, searchBackend SearchBackend, referenceBackend reference.Store, imageStore image.Store, layerStore layer.Store) router.Router { r := &imageRouter{ backend: backend, + searchBackend: searchBackend, referenceBackend: referenceBackend, imageStore: imageStore, layerStore: layerStore, diff --git a/api/server/router/image/image_routes.go b/api/server/router/image/image_routes.go index cd6b2fe96a39a..37140401cfeb4 100644 --- a/api/server/router/image/image_routes.go +++ b/api/server/router/image/image_routes.go @@ -349,7 +349,12 @@ func (s *imageRouter) getImagesSearch(ctx context.Context, w http.ResponseWriter // For a search it is not an error if no auth was given. Ignore invalid // AuthConfig to increase compatibility with the existing API. authConfig, _ := registry.DecodeAuthConfig(r.Header.Get(registry.AuthHeader)) - query, err := s.backend.SearchRegistryForImages(ctx, searchFilters, r.Form.Get("term"), limit, authConfig, headers) + query, err := s.searchBackend.SearchImages(ctx, r.Form.Get("term"), registry.SearchOpts{ + Filters: searchFilters, + Limit: limit, + AuthConfig: authConfig, + Headers: headers, + }) if err != nil { return err } diff --git a/api/types/registry/registry.go b/api/types/registry/registry.go index 62a88f5be89d5..b57046a9bd7a0 100644 --- a/api/types/registry/registry.go +++ b/api/types/registry/registry.go @@ -4,6 +4,7 @@ import ( "encoding/json" "net" + "github.com/docker/docker/api/types/filters" v1 "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -84,6 +85,21 @@ type IndexInfo struct { Official bool } +// SearchOpts holds parameters to search a registry for images. +type SearchOpts struct { + // Filters to apply to the search results. + Filters filters.Args + // Limit limits the number of images to return. If not set, a default + // limit is set by the search service. + Limit int + // AuthConfig holds authentication credentials for authenticating with + // the registry. + AuthConfig *AuthConfig + // Headers contains custom "X-Meta-" prefixed meta-headers to include + // in registry requests. + Headers map[string][]string +} + // SearchResult describes a search result returned from a registry type SearchResult struct { // StarCount indicates the number of stars this repository has diff --git a/cmd/dockerd/daemon.go b/cmd/dockerd/daemon.go index 28de28af43144..96f4d356a7f7c 100644 --- a/cmd/dockerd/daemon.go +++ b/cmd/dockerd/daemon.go @@ -531,6 +531,7 @@ func initRouter(opts routerOptions) { container.NewRouter(opts.daemon, decoder, opts.daemon.RawSysInfo().CgroupUnified), image.NewRouter( opts.daemon.ImageService(), + opts.daemon.SearchService(), opts.daemon.ReferenceStore, opts.daemon.ImageService().DistributionServices().ImageStore, opts.daemon.ImageService().DistributionServices().LayerStore, diff --git a/daemon/containerd/image_search.go b/daemon/containerd/image_search.go deleted file mode 100644 index bccb44417e23d..0000000000000 --- a/daemon/containerd/image_search.go +++ /dev/null @@ -1,18 +0,0 @@ -package containerd - -import ( - "context" - "errors" - - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/registry" -) - -// SearchRegistryForImages queries the registry for images matching -// term. authConfig is used to login. -// -// TODO: this could be implemented in a registry service instead of the image -// service. -func (i *ImageService) SearchRegistryForImages(ctx context.Context, searchFilters filters.Args, term string, limit int, authConfig *registry.AuthConfig, metaHeaders map[string][]string) (*registry.SearchResults, error) { - return nil, errors.New("not implemented") -} diff --git a/daemon/daemon.go b/daemon/daemon.go index 27169499ee716..64e3b4e710c60 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -36,6 +36,7 @@ import ( "github.com/docker/docker/daemon/images" "github.com/docker/docker/daemon/logger" "github.com/docker/docker/daemon/network" + "github.com/docker/docker/daemon/search" "github.com/docker/docker/daemon/stats" dmetadata "github.com/docker/docker/distribution/metadata" "github.com/docker/docker/dockerversion" @@ -78,6 +79,7 @@ type Daemon struct { containersReplica container.ViewDB execCommands *exec.Store imageService ImageService + searchService *search.Service configStore *config.Config statsCollector *stats.Collector defaultLogConfig containertypes.LogConfig @@ -851,6 +853,11 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S } } + d.searchService, err = search.NewService(registry.SearchServiceOptions{ServiceOptions: config.ServiceOptions}) + if err != nil { + return nil, err + } + d.registryService = registryService logger.RegisterPluginGetter(d.PluginStore) @@ -1463,6 +1470,11 @@ func (daemon *Daemon) ImageService() ImageService { return daemon.imageService } +// SearchService returns the backend used for searching images. +func (daemon *Daemon) SearchService() *search.Service { + return daemon.searchService +} + // BuilderBackend returns the backend used by builder func (daemon *Daemon) BuilderBackend() builder.Backend { return struct { diff --git a/daemon/image_service.go b/daemon/image_service.go index 01683d33123b6..50e583af75f2a 100644 --- a/daemon/image_service.go +++ b/daemon/image_service.go @@ -69,7 +69,6 @@ type ImageService interface { // Other GetRepository(ctx context.Context, ref reference.Named, authConfig *registry.AuthConfig) (distribution.Repository, error) - SearchRegistryForImages(ctx context.Context, searchFilters filters.Args, term string, limit int, authConfig *registry.AuthConfig, headers map[string][]string) (*registry.SearchResults, error) DistributionServices() images.DistributionServices Children(id image.ID) []image.ID Cleanup() error diff --git a/daemon/search/errors.go b/daemon/search/errors.go new file mode 100644 index 0000000000000..cddef218e3139 --- /dev/null +++ b/daemon/search/errors.go @@ -0,0 +1,18 @@ +package search + +import "fmt" + +type invalidFilter struct { + filter string + value interface{} +} + +func (e invalidFilter) Error() string { + msg := "invalid filter '" + e.filter + if e.value != nil { + msg += fmt.Sprintf("=%s", e.value) + } + return msg + "'" +} + +func (e invalidFilter) InvalidParameter() {} diff --git a/daemon/images/image_search.go b/daemon/search/service.go similarity index 57% rename from daemon/images/image_search.go rename to daemon/search/service.go index 86b897964c5af..0e17847f7f7b8 100644 --- a/daemon/images/image_search.go +++ b/daemon/search/service.go @@ -1,28 +1,44 @@ -package images // import "github.com/docker/docker/daemon/images" +package search // import "github.com/docker/docker/daemon/search" import ( "context" "strconv" - "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/dockerversion" + registrypkg "github.com/docker/docker/registry" ) +// registrySearch provides functions to search a registry, using the registry V1 search API. +type registrySearch interface { + Search(ctx context.Context, term string, limit int, authConfig *registry.AuthConfig, userAgent string, header map[string][]string) (*registry.SearchResults, error) +} + var acceptedSearchFilterTags = map[string]bool{ "is-automated": true, "is-official": true, "stars": true, } -// SearchRegistryForImages queries the registry for images matching -// term. authConfig is used to login. -// -// TODO: this could be implemented in a registry service instead of the image -// service. -func (i *ImageService) SearchRegistryForImages(ctx context.Context, searchFilters filters.Args, term string, limit int, - authConfig *registry.AuthConfig, - headers map[string][]string) (*registry.SearchResults, error) { +// Service provides the backend to search registries for images. +type Service struct { + registrySearch registrySearch +} + +// NewService initializes a new Service to search registries for images. +func NewService(opts registrypkg.SearchServiceOptions) (*Service, error) { + registrySearch, err := registrypkg.NewSearchService(opts) + if err != nil { + return nil, err + } + + return &Service{registrySearch: registrySearch}, nil +} + +// SearchImages queries the registry for images matching the given term and +// options. +func (i *Service) SearchImages(ctx context.Context, term string, opts registry.SearchOpts) (*registry.SearchResults, error) { + searchFilters := opts.Filters if err := searchFilters.Validate(acceptedSearchFilterTags); err != nil { return nil, err @@ -45,8 +61,7 @@ func (i *ImageService) SearchRegistryForImages(ctx context.Context, searchFilter } } if searchFilters.Contains("stars") { - hasStars := searchFilters.Get("stars") - for _, hasStar := range hasStars { + for _, hasStar := range searchFilters.Get("stars") { iHasStar, err := strconv.Atoi(hasStar) if err != nil { return nil, invalidFilter{"stars", hasStar} @@ -57,12 +72,12 @@ func (i *ImageService) SearchRegistryForImages(ctx context.Context, searchFilter } } - unfilteredResult, err := i.registryService.Search(ctx, term, limit, authConfig, dockerversion.DockerUserAgent(ctx), headers) + unfilteredResult, err := i.registrySearch.Search(ctx, term, opts.Limit, opts.AuthConfig, dockerversion.DockerUserAgent(ctx), opts.Headers) if err != nil { return nil, err } - filteredResults := []registry.SearchResult{} + filteredResults := make([]registry.SearchResult, 0, len(unfilteredResult.Results)) for _, result := range unfilteredResult.Results { if searchFilters.Contains("is-automated") { if isAutomated != result.IsAutomated { diff --git a/daemon/images/image_search_test.go b/daemon/search/service_test.go similarity index 93% rename from daemon/images/image_search_test.go rename to daemon/search/service_test.go index 115793d7190bf..92b2d7509bf40 100644 --- a/daemon/images/image_search_test.go +++ b/daemon/search/service_test.go @@ -1,4 +1,4 @@ -package images // import "github.com/docker/docker/daemon/images" +package search // import "github.com/docker/docker/daemon/search" import ( "context" @@ -8,12 +8,10 @@ import ( "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/errdefs" - registrypkg "github.com/docker/docker/registry" "gotest.tools/v3/assert" ) type fakeService struct { - registrypkg.Service shouldReturnError bool term string @@ -31,7 +29,7 @@ func (s *fakeService) Search(ctx context.Context, term string, limit int, authCo }, nil } -func TestSearchRegistryForImagesErrors(t *testing.T) { +func TestSearchImagesErrors(t *testing.T) { errorCases := []struct { filtersArgs filters.Args shouldReturnError bool @@ -82,12 +80,14 @@ func TestSearchRegistryForImagesErrors(t *testing.T) { for _, tc := range errorCases { tc := tc t.Run(tc.expectedError, func(t *testing.T) { - daemon := &ImageService{ - registryService: &fakeService{ + service := &Service{ + registrySearch: &fakeService{ shouldReturnError: tc.shouldReturnError, }, } - _, err := daemon.SearchRegistryForImages(context.Background(), tc.filtersArgs, "term", 0, nil, map[string][]string{}) + _, err := service.SearchImages(context.Background(), "term", registry.SearchOpts{ + Filters: tc.filtersArgs, + }) assert.ErrorContains(t, err, tc.expectedError) if tc.shouldReturnError { assert.Check(t, errdefs.IsUnknown(err), "got: %T: %v", err, err) @@ -98,7 +98,7 @@ func TestSearchRegistryForImagesErrors(t *testing.T) { } } -func TestSearchRegistryForImages(t *testing.T) { +func TestSearchImages(t *testing.T) { term := "term" successCases := []struct { name string @@ -348,13 +348,15 @@ func TestSearchRegistryForImages(t *testing.T) { for _, tc := range successCases { tc := tc t.Run(tc.name, func(t *testing.T) { - daemon := &ImageService{ - registryService: &fakeService{ + service := &Service{ + registrySearch: &fakeService{ term: term, results: tc.registryResults, }, } - results, err := daemon.SearchRegistryForImages(context.Background(), tc.filtersArgs, term, 0, nil, map[string][]string{}) + results, err := service.SearchImages(context.Background(), term, registry.SearchOpts{ + Filters: tc.filtersArgs, + }) assert.NilError(t, err) assert.Equal(t, results.Query, term) assert.Equal(t, results.NumResults, len(tc.expectedResults)) diff --git a/registry/search_service.go b/registry/search_service.go index 2d47986e7b8b1..01f7eb9399f41 100644 --- a/registry/search_service.go +++ b/registry/search_service.go @@ -4,15 +4,40 @@ import ( "context" "net/http" "strings" + "sync" "github.com/docker/distribution/registry/client/auth" "github.com/docker/docker/api/types/registry" "github.com/sirupsen/logrus" ) +// SearchServiceOptions holds configuration options for the search service, +// such as mirrors and insecure registries. It currently wraps ServiceOptions, +// but does not use the ServiceOptions.AllowNondistributableArtifacts field. +type SearchServiceOptions struct { + ServiceOptions +} + +// NewSearchService returns a new instance of SearchService ready to be +// installed into an engine. +func NewSearchService(options SearchServiceOptions) (*SearchService, error) { + config, err := newServiceConfig(options.ServiceOptions) + if err != nil { + return nil, err + } + return &SearchService{config: config}, err +} + +// SearchService is a service to search a registry. It tracks configuration data +// such as a list of mirrors. +type SearchService struct { + config *serviceConfig + mu sync.RWMutex +} + // Search queries the public registry for images matching the specified // search terms, and returns the results. -func (s *defaultService) Search(_ context.Context, term string, limit int, authConfig *registry.AuthConfig, userAgent string, headers map[string][]string) (*registry.SearchResults, error) { +func (s *SearchService) Search(_ context.Context, term string, limit int, authConfig *registry.AuthConfig, userAgent string, headers map[string][]string) (*registry.SearchResults, error) { // TODO Use ctx when searching for repositories if hasScheme(term) { return nil, invalidParamf("invalid repository name: repository name (%s) should not have a scheme", term) diff --git a/registry/service.go b/registry/service.go index f3e688c134861..8898ebc621f10 100644 --- a/registry/service.go +++ b/registry/service.go @@ -19,7 +19,6 @@ type Service interface { LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) ResolveRepository(name reference.Named) (*RepositoryInfo, error) - Search(ctx context.Context, term string, limit int, authConfig *registry.AuthConfig, userAgent string, headers map[string][]string) (*registry.SearchResults, error) ServiceConfig() *registry.ServiceConfig LoadAllowNondistributableArtifacts([]string) error LoadMirrors([]string) error From a41d2a5d872a2b578f0aa9c660d40b7dfd5870c1 Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Thu, 25 Aug 2022 14:44:59 +0200 Subject: [PATCH 71/90] registry: remove Service interface There's only a single implementation, so no need to return an interface. Signed-off-by: Sebastiaan van Stijn --- daemon/daemon.go | 2 +- daemon/images/service.go | 4 ++-- distribution/config.go | 2 +- registry/registry_test.go | 2 +- registry/service.go | 41 ++++++++++++++------------------------- registry/service_v2.go | 2 +- 6 files changed, 21 insertions(+), 32 deletions(-) diff --git a/daemon/daemon.go b/daemon/daemon.go index 64e3b4e710c60..e029bdc5b846a 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -83,7 +83,7 @@ type Daemon struct { configStore *config.Config statsCollector *stats.Collector defaultLogConfig containertypes.LogConfig - registryService registry.Service + registryService *registry.Service EventsService *events.Events netController libnetwork.NetworkController volumes *volumesservice.VolumesService diff --git a/daemon/images/service.go b/daemon/images/service.go index 09d5a3644dfc4..6e0c75356cc50 100644 --- a/daemon/images/service.go +++ b/daemon/images/service.go @@ -44,7 +44,7 @@ type ImageServiceConfig struct { MaxConcurrentUploads int MaxDownloadAttempts int ReferenceStore dockerreference.Store - RegistryService registry.Service + RegistryService *registry.Service TrustKey libtrust.PrivateKey ContentStore content.Store Leases leases.Manager @@ -85,7 +85,7 @@ type ImageService struct { layerStore layer.Store pruneRunning int32 referenceStore dockerreference.Store - registryService registry.Service + registryService *registry.Service trustKey libtrust.PrivateKey uploadManager *xfer.LayerUploadManager leases leases.Manager diff --git a/distribution/config.go b/distribution/config.go index a00392199fe0b..36f726036a63f 100644 --- a/distribution/config.go +++ b/distribution/config.go @@ -36,7 +36,7 @@ type Config struct { ProgressOutput progress.Output // RegistryService is the registry service to use for TLS configuration // and endpoint lookup. - RegistryService registrypkg.Service + RegistryService *registrypkg.Service // ImageEventLogger notifies events for a given image ImageEventLogger func(id, name, action string) // MetadataStore is the storage backend for distribution-specific diff --git a/registry/registry_test.go b/registry/registry_test.go index 2667bb9005ad8..7b5fed2a4b130 100644 --- a/registry/registry_test.go +++ b/registry/registry_test.go @@ -396,7 +396,7 @@ func TestMirrorEndpointLookup(t *testing.T) { if err != nil { t.Fatal(err) } - s := defaultService{config: cfg} + s := Service{config: cfg} imageName, err := reference.WithName(IndexName + "/test/image") if err != nil { diff --git a/registry/service.go b/registry/service.go index 8898ebc621f10..0415a48a7704c 100644 --- a/registry/service.go +++ b/registry/service.go @@ -13,42 +13,31 @@ import ( "github.com/sirupsen/logrus" ) -// Service is the interface defining what a registry service should implement. -type Service interface { - Auth(ctx context.Context, authConfig *registry.AuthConfig, userAgent string) (status, token string, err error) - LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) - LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) - ResolveRepository(name reference.Named) (*RepositoryInfo, error) - ServiceConfig() *registry.ServiceConfig - LoadAllowNondistributableArtifacts([]string) error - LoadMirrors([]string) error - LoadInsecureRegistries([]string) error -} - -// defaultService is a registry service. It tracks configuration data such as a list +// Service is a registry service. It tracks configuration data such as a list // of mirrors. -type defaultService struct { +type Service struct { config *serviceConfig mu sync.RWMutex } -// NewService returns a new instance of defaultService ready to be +// NewService returns a new instance of Service ready to be // installed into an engine. -func NewService(options ServiceOptions) (Service, error) { +func NewService(options ServiceOptions) (*Service, error) { config, err := newServiceConfig(options) - return &defaultService{config: config}, err + return &Service{config: config}, err } // ServiceConfig returns a copy of the public registry service's configuration. -func (s *defaultService) ServiceConfig() *registry.ServiceConfig { +func (s *Service) ServiceConfig() *registry.ServiceConfig { s.mu.RLock() defer s.mu.RUnlock() return s.config.copy() } -// LoadAllowNondistributableArtifacts loads allow-nondistributable-artifacts registries for Service. -func (s *defaultService) LoadAllowNondistributableArtifacts(registries []string) error { +// LoadAllowNondistributableArtifacts loads allow-nondistributable-artifacts +// registries for Service. +func (s *Service) LoadAllowNondistributableArtifacts(registries []string) error { s.mu.Lock() defer s.mu.Unlock() @@ -56,7 +45,7 @@ func (s *defaultService) LoadAllowNondistributableArtifacts(registries []string) } // LoadMirrors loads registry mirrors for Service -func (s *defaultService) LoadMirrors(mirrors []string) error { +func (s *Service) LoadMirrors(mirrors []string) error { s.mu.Lock() defer s.mu.Unlock() @@ -64,7 +53,7 @@ func (s *defaultService) LoadMirrors(mirrors []string) error { } // LoadInsecureRegistries loads insecure registries for Service -func (s *defaultService) LoadInsecureRegistries(registries []string) error { +func (s *Service) LoadInsecureRegistries(registries []string) error { s.mu.Lock() defer s.mu.Unlock() @@ -74,7 +63,7 @@ func (s *defaultService) LoadInsecureRegistries(registries []string) error { // Auth contacts the public registry with the provided credentials, // and returns OK if authentication was successful. // It can be used to verify the validity of a client's credentials. -func (s *defaultService) Auth(ctx context.Context, authConfig *registry.AuthConfig, userAgent string) (status, token string, err error) { +func (s *Service) Auth(ctx context.Context, authConfig *registry.AuthConfig, userAgent string) (status, token string, err error) { // TODO Use ctx when searching for repositories var registryHostName = IndexHostname @@ -115,7 +104,7 @@ func (s *defaultService) Auth(ctx context.Context, authConfig *registry.AuthConf // ResolveRepository splits a repository name into its components // and configuration of the associated registry. -func (s *defaultService) ResolveRepository(name reference.Named) (*RepositoryInfo, error) { +func (s *Service) ResolveRepository(name reference.Named) (*RepositoryInfo, error) { s.mu.RLock() defer s.mu.RUnlock() return newRepositoryInfo(s.config, name) @@ -134,7 +123,7 @@ type APIEndpoint struct { // LookupPullEndpoints creates a list of v2 endpoints to try to pull from, in order of preference. // It gives preference to mirrors over the actual registry, and HTTPS over plain HTTP. -func (s *defaultService) LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) { +func (s *Service) LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) { s.mu.RLock() defer s.mu.RUnlock() @@ -143,7 +132,7 @@ func (s *defaultService) LookupPullEndpoints(hostname string) (endpoints []APIEn // LookupPushEndpoints creates a list of v2 endpoints to try to push to, in order of preference. // It gives preference to HTTPS over plain HTTP. Mirrors are not included. -func (s *defaultService) LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) { +func (s *Service) LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) { s.mu.RLock() defer s.mu.RUnlock() diff --git a/registry/service_v2.go b/registry/service_v2.go index f147af0faaa7b..9eba86b4e9e05 100644 --- a/registry/service_v2.go +++ b/registry/service_v2.go @@ -7,7 +7,7 @@ import ( "github.com/docker/go-connections/tlsconfig" ) -func (s *defaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndpoint, err error) { +func (s *Service) lookupV2Endpoints(hostname string) (endpoints []APIEndpoint, err error) { if hostname == DefaultNamespace || hostname == IndexHostname { for _, mirror := range s.config.Mirrors { if !strings.HasPrefix(mirror, "http://") && !strings.HasPrefix(mirror, "https://") { From cf0b3d6e76d8fe5d9751ffc27d300cfddb2bfacf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Gronowski?= Date: Wed, 24 Aug 2022 12:02:54 +0200 Subject: [PATCH 72/90] c8d/push: Push lazy blobs with distribution source labels MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This makes it possible to push multi-platform images which contents are missing from the local content store, but can be fetched from a source repository. Co-authored-by: Djordje Lukic Signed-off-by: Paweł Gronowski --- daemon/containerd/image_push.go | 253 ++++++++++++++++++++++++++++++-- daemon/containerd/store.go | 87 +++++++++++ 2 files changed, 325 insertions(+), 15 deletions(-) create mode 100644 daemon/containerd/store.go diff --git a/daemon/containerd/image_push.go b/daemon/containerd/image_push.go index 2a1b3451871ce..24e19a1898dd6 100644 --- a/daemon/containerd/image_push.go +++ b/daemon/containerd/image_push.go @@ -2,24 +2,31 @@ package containerd import ( "context" + "encoding/json" "io" + "strings" - "github.com/containerd/containerd" + "github.com/containerd/containerd/content" + cerrdefs "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" containerdimages "github.com/containerd/containerd/images" "github.com/containerd/containerd/images/converter" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/remotes" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/errdefs" + "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" + "golang.org/x/sync/semaphore" ) // PushImage initiates a push operation on the repository named localName. func (i *ImageService) PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *registry.AuthConfig, outStream io.Writer) error { // TODO: Pass this from user? - platformMatcher := platforms.DefaultStrict() + platformMatcher := platforms.All ref, err := reference.ParseNormalizedNamed(image) if err != nil { @@ -59,18 +66,21 @@ func (i *ImageService) PushImage(ctx context.Context, image, tag string, metaHea jobs := newJobs() imageHandler := containerdimages.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { - logrus.WithField("desc", desc).Debug("Pushing") + logrus.WithField("digest", desc.Digest.String()). + WithField("mediaType", desc.MediaType). + Debug("Pushing") if desc.MediaType != containerdimages.MediaTypeDockerSchema1Manifest { children, err := containerdimages.Children(ctx, store, desc) - - if err == nil { - for _, c := range children { - jobs.Add(c) - } + if err != nil { + return nil, err + } + for _, c := range children { + jobs.Add(c) } jobs.Add(desc) } + return nil, nil }) imageHandler = remotes.SkipNonDistributableBlobs(imageHandler) @@ -80,12 +90,225 @@ func (i *ImageService) PushImage(ctx context.Context, image, tag string, metaHea finishProgress := showProgress(ctx, jobs, outStream, pushProgress(tracker)) defer finishProgress() - logrus.WithField("desc", target).WithField("ref", ref.String()).Info("Pushing desc to remote ref") - err = i.client.Push(ctx, ref.String(), target, - containerd.WithResolver(resolver), - containerd.WithPlatformMatcher(platformMatcher), - containerd.WithImageHandler(imageHandler), - ) + return lazyPush(ctx, store, ref.String(), target, resolver, imageHandler) +} + +// lazyPush uploads the provided content to a remote resource. It also attempts to +// handle push of content, which is not present locally in the store. +func lazyPush(ctx context.Context, store content.Store, ref string, desc ocispec.Descriptor, resolver remotes.Resolver, imagesHandler containerdimages.HandlerFunc) error { + // Annotate ref with digest to push only push tag for single digest + if !strings.Contains(ref, "@") { + ref = ref + "@" + desc.Digest.String() + } + + pusher, err := resolver.Pusher(ctx, ref) + if err != nil { + return err + } + + wrapper := func(h images.Handler) images.Handler { + return images.Handlers(imagesHandler, h) + } + + sources, err := collectSources(ctx, desc, store) + if err != nil { + return err + } + + lazyStore := newLazyContentStore(store, sources) + + var limiter *semaphore.Weighted + return remotes.PushContent(ctx, pusher, desc, lazyStore, limiter, platforms.All, wrapper) +} + +func findLazyChildren(ctx context.Context, desc ocispec.Descriptor, store content.Store) ([]ocispec.Descriptor, error) { + // Collect to hashset to remove duplicates + set := map[string]ocispec.Descriptor{} + + // Do a breadth-first search starting from this descriptor + queue := []ocispec.Descriptor{desc} + for len(queue) > 0 { + child := queue[0] + queue = queue[1:] + + if containerdimages.IsNonDistributable(child.MediaType) { + continue + } + + _, err := store.ReaderAt(ctx, child) + if err != nil { + if cerrdefs.IsNotFound(err) { + set[child.Digest.String()] = child + continue + } + return nil, err + } + + newChildren, err := containerdimages.Children(ctx, store, child) + if err != nil { + return nil, err + } + + if len(newChildren) > 0 { + queue = append(queue, newChildren...) + } + } + + result := make([]ocispec.Descriptor, 0, len(set)) + for _, desc := range set { + result = append(result, desc) + logrus.WithField("digest", desc.Digest.String()). + WithField("mediaType", desc.MediaType). + Debug("lazy children found") + } + + return result, nil +} + +// peekNotJson does a small peek of the content to check if content is definitely not JSON. +// It returns true if content is definitely not JSON, or false if it was unable to detect if it's +// JSON or not. +func peekNotJson(ctx context.Context, store content.Store, desc ocispec.Descriptor) (bool, error) { + readerAt, err := store.ReaderAt(ctx, desc) + if err != nil { + logrus.WithError(err).WithField("digest", desc.Digest).Debug("failed to create reader to peek for json") + return false, err + } + + buffer := []byte{0} + n, err := readerAt.ReadAt(buffer, 0) + if n != 1 || err != nil { + logrus.WithError(err).WithField("digest", desc.Digest).Debug("failed to peek json") + return false, err + } + + // It doesn't start with {, then it's not a json. + return rune(buffer[0]) != '{', nil +} + +func collectSources(ctx context.Context, desc ocispec.Descriptor, store content.Store) (map[digest.Digest]distributionSource, error) { + lazyChildren, err := findLazyChildren(ctx, desc, store) + if err != nil { + logrus.WithField("digest", desc.Digest.String()). + WithField("mediaType", desc.MediaType). + WithError(err).Error("failed to find lazy children referenced by descriptor") + return nil, err + } + + sources := map[digest.Digest]distributionSource{} + + success := errors.New("success, found the source but can't return earlier without an error") + err = store.Walk(ctx, func(i content.Info) error { + source := extractDistributionSource(i.Labels) + + // Nah, we're looking for a parent of this lazy child. + // This one will not provide us with the source. + if source.value == "" { + return nil + } + + desc := ocispec.Descriptor{Digest: i.Digest} + + // Do a simple peek of the content to avoid big blobs which definitely aren't json. + notJson, err := peekNotJson(ctx, store, desc) + if err != nil { + return err + } + if notJson { + logrus.WithField("digest", i.Digest).Debug("skipping, definitely not a json") + return nil + } + + // Read the manifest + blob, err := content.ReadBlob(ctx, store, desc) + if err != nil { + logrus.WithError(err).WithField("digest", i.Digest).Error("error reading blob") + return err + } + + // Manifests and indexes have different children. + // Index stores other manifests and manifests store layers. + // To avoid unmarshaling the blob separately as manifest and index + // this holds fields that contains them both and the media type. + var indexOrManifest struct { + MediaType string `json:"mediaType,omitempty"` + Manifests []ocispec.Descriptor `json:"manifests,omitempty"` + Layers []ocispec.Descriptor `json:"layers,omitempty"` + } + + err = json.Unmarshal(blob, &indexOrManifest) + if err != nil { + return nil + } + + mediaType := indexOrManifest.MediaType + // Just in case, check if it really is manifest or index. + if !containerdimages.IsManifestType(mediaType) && !containerdimages.IsIndexType(mediaType) { + return nil + } + if len(indexOrManifest.Layers) == 0 && len(indexOrManifest.Manifests) == 0 { + return nil + } + + // Look if this manifest/index specifies any of the lazy children + children := append(indexOrManifest.Layers, indexOrManifest.Manifests...) + for _, layer := range children { + for idx, wanted := range lazyChildren { + if layer.Digest == wanted.Digest { + // Found it! + sources[wanted.Digest] = source + + // Don't look for it anymore + if len(lazyChildren) > 1 { + lastIdx := len(lazyChildren) - 1 + lazyChildren[idx] = lazyChildren[lastIdx] + lazyChildren = lazyChildren[:lastIdx] + } else { + // We found all lazy children, let's end the walk. + lazyChildren = lazyChildren[:0] + return success + } + } + } + } + + return nil + }) + + if err == success { + err = nil + } + if len(lazyChildren) > 0 { + msg := "missing blobs with no source: " + for idx, c := range lazyChildren { + if idx != 0 { + msg += ", " + } + msg += c.Digest.String() + } + err = errdefs.NotFound(errors.New(msg)) + } + + return sources, err +} + +func extractDistributionSource(labels map[string]string) distributionSource { + var source distributionSource + + // Check if this blob has a distributionSource label + // if yes, read it as source + for k, v := range labels { + if strings.HasPrefix(k, "containerd.io/distribution.source.") { + source.key = k + source.value = v + break + } + } + + return source +} - return err +type distributionSource struct { + key string + value string } diff --git a/daemon/containerd/store.go b/daemon/containerd/store.go new file mode 100644 index 0000000000000..9d2114794ee74 --- /dev/null +++ b/daemon/containerd/store.go @@ -0,0 +1,87 @@ +package containerd + +import ( + "context" + + "github.com/containerd/containerd/content" + cerrdefs "github.com/containerd/containerd/errdefs" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +type lazyContentStore struct { + s content.Store + sources map[digest.Digest]distributionSource +} + +func newLazyContentStore(s content.Store, sources map[digest.Digest]distributionSource) lazyContentStore { + return lazyContentStore{ + s: s, + sources: sources, + } +} + +// Delete implements content.Store +func (p lazyContentStore) Delete(ctx context.Context, dgst digest.Digest) error { + return p.s.Delete(ctx, dgst) +} + +// Info implements content.Store +func (p lazyContentStore) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { + info, err := p.s.Info(ctx, dgst) + if err != nil { + if !cerrdefs.IsNotFound(err) { + return info, err + } + s, ok := p.sources[dgst] + if !ok { + return info, err + } + + logrus.WithField("digest", dgst).WithField("source", s).Debug("faking") + return content.Info{ + Digest: dgst, + Labels: map[string]string{ + s.key: s.value, + }, + }, nil + } + + return info, nil +} + +// Update implements content.Store +func (p lazyContentStore) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { + return p.s.Update(ctx, info, fieldpaths...) +} + +// Walk implements content.Store +func (p lazyContentStore) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error { + return p.s.Walk(ctx, fn, filters...) +} + +// ReaderAt implements content.Store +func (p lazyContentStore) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { + return p.s.ReaderAt(ctx, desc) +} + +// Abort implements content.Store +func (p lazyContentStore) Abort(ctx context.Context, ref string) error { + return p.s.Abort(ctx, ref) +} + +// ListStatuses implements content.Store +func (p lazyContentStore) ListStatuses(ctx context.Context, filters ...string) ([]content.Status, error) { + return p.s.ListStatuses(ctx, filters...) +} + +// Status implements content.Store +func (p lazyContentStore) Status(ctx context.Context, ref string) (content.Status, error) { + return p.s.Status(ctx, ref) +} + +// Writer implements content.Store +func (p lazyContentStore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { + return p.s.Writer(ctx, opts...) +} From ceb484db2953697fb82a00fe7e23659a4ad67c5f Mon Sep 17 00:00:00 2001 From: Sebastiaan van Stijn Date: Thu, 25 Aug 2022 18:41:02 +0200 Subject: [PATCH 73/90] daemon: set containerd default snapshotter if none is configured This is a temporary workaround for the daemon not yet having automatic selection of snapshotters. Signed-off-by: Sebastiaan van Stijn --- daemon/daemon.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/daemon/daemon.go b/daemon/daemon.go index e029bdc5b846a..4d0150d432031 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -1004,6 +1004,11 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S } if d.UsesSnapshotter() { + // FIXME(thaJeztah): implement snapshotter-selection similar to automatic graph-driver selection + if driverName == "" || driverName == "overlay2" || driverName == "overlay" { + driverName = containerd.DefaultSnapshotter + } + // Configure and validate the kernels security support. Note this is a Linux/FreeBSD // operation only, so it is safe to pass *just* the runtime OS graphdriver. if err := configureKernelSecuritySupport(config, driverName); err != nil { From 180ed082fd5fbbb1e423eb0eb9d24acafc5792d7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Gronowski?= Date: Thu, 1 Sep 2022 12:58:36 +0200 Subject: [PATCH 74/90] c8d/progress: Don't use cancelled context MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Paweł Gronowski --- daemon/containerd/progress.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/daemon/containerd/progress.go b/daemon/containerd/progress.go index 81e039b6fe7a6..6b9bb1187cc22 100644 --- a/daemon/containerd/progress.go +++ b/daemon/containerd/progress.go @@ -54,6 +54,9 @@ func showProgress(ctx context.Context, ongoing *jobs, w io.Writer, updateFunc up return } case <-ctx.Done(): + // Don't use the context that is already done. + // Perform the last update with a new context. + ctx := context.Background() updateFunc(ctx, ongoing, out, start) return } From cfaba5b39a18bd879c4bdf5943c70f937a2a8ce6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Gronowski?= Date: Thu, 1 Sep 2022 13:06:17 +0200 Subject: [PATCH 75/90] c8d/push: Fetch missing resources that can't be mounted MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Paweł Gronowski --- daemon/containerd/image_pull.go | 4 +- daemon/containerd/image_push.go | 251 ++------------- daemon/containerd/lazy_push.go | 532 ++++++++++++++++++++++++++++++++ daemon/containerd/progress.go | 21 +- 4 files changed, 571 insertions(+), 237 deletions(-) create mode 100644 daemon/containerd/lazy_push.go diff --git a/daemon/containerd/image_pull.go b/daemon/containerd/image_pull.go index f78da440c8c3b..b6d72ab0b3f36 100644 --- a/daemon/containerd/image_pull.go +++ b/daemon/containerd/image_pull.go @@ -14,6 +14,7 @@ import ( "github.com/docker/distribution/reference" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/errdefs" + "github.com/docker/docker/pkg/streamformatter" "github.com/opencontainers/go-digest" specs "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -58,7 +59,8 @@ func (i *ImageService) PullImage(ctx context.Context, image, tagOrDigest string, opts = append(opts, containerd.WithImageHandler(h)) opts = i.applySnapshotterOpts(opts, ref) - finishProgress := showProgress(ctx, jobs, outStream, pullProgress(i.client.ContentStore())) + out := streamformatter.NewJSONProgressOutput(outStream, false) + finishProgress := showProgress(ctx, jobs, out, pullProgress(i.client.ContentStore(), true)) defer finishProgress() _, err = i.client.Pull(ctx, ref.String(), opts...) diff --git a/daemon/containerd/image_push.go b/daemon/containerd/image_push.go index 24e19a1898dd6..b83a43f0c6724 100644 --- a/daemon/containerd/image_push.go +++ b/daemon/containerd/image_push.go @@ -2,21 +2,16 @@ package containerd import ( "context" - "encoding/json" "io" - "strings" - "github.com/containerd/containerd/content" - cerrdefs "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/images" containerdimages "github.com/containerd/containerd/images" "github.com/containerd/containerd/images/converter" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/remotes" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types/registry" - "github.com/docker/docker/errdefs" - "github.com/opencontainers/go-digest" + "github.com/docker/docker/pkg/progress" + "github.com/docker/docker/pkg/streamformatter" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -24,7 +19,7 @@ import ( ) // PushImage initiates a push operation on the repository named localName. -func (i *ImageService) PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *registry.AuthConfig, outStream io.Writer) error { +func (i *ImageService) PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *registry.AuthConfig, outStream io.Writer) (outerr error) { // TODO: Pass this from user? platformMatcher := platforms.All @@ -50,8 +45,8 @@ func (i *ImageService) PushImage(ctx context.Context, image, tag string, metaHea target := img.Target - // Create a temporary image which is stripped from content that references other platforms. - // We or the remote may not have them and referencing them will end with an error. + // If user requested specific platforms to push, then create a manifest + // list with only the matching platforms. if platformMatcher != platforms.All { tmpRef := ref.String() + "-tmp-platformspecific" platformImg, err := converter.Convert(ctx, i.client, tmpRef, ref.String(), converter.WithPlatform(platformMatcher)) @@ -65,10 +60,13 @@ func (i *ImageService) PushImage(ctx context.Context, image, tag string, metaHea jobs := newJobs() + resolver, tracker := newResolverFromAuthConfig(authConfig) + imageHandler := containerdimages.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { logrus.WithField("digest", desc.Digest.String()). WithField("mediaType", desc.MediaType). Debug("Pushing") + if desc.MediaType != containerdimages.MediaTypeDockerSchema1Manifest { children, err := containerdimages.Children(ctx, store, desc) if err != nil { @@ -85,230 +83,23 @@ func (i *ImageService) PushImage(ctx context.Context, image, tag string, metaHea }) imageHandler = remotes.SkipNonDistributableBlobs(imageHandler) - resolver, tracker := newResolverFromAuthConfig(authConfig) - - finishProgress := showProgress(ctx, jobs, outStream, pushProgress(tracker)) - defer finishProgress() - - return lazyPush(ctx, store, ref.String(), target, resolver, imageHandler) -} - -// lazyPush uploads the provided content to a remote resource. It also attempts to -// handle push of content, which is not present locally in the store. -func lazyPush(ctx context.Context, store content.Store, ref string, desc ocispec.Descriptor, resolver remotes.Resolver, imagesHandler containerdimages.HandlerFunc) error { - // Annotate ref with digest to push only push tag for single digest - if !strings.Contains(ref, "@") { - ref = ref + "@" + desc.Digest.String() - } - - pusher, err := resolver.Pusher(ctx, ref) - if err != nil { - return err - } - - wrapper := func(h images.Handler) images.Handler { - return images.Handlers(imagesHandler, h) - } - - sources, err := collectSources(ctx, desc, store) - if err != nil { - return err - } - - lazyStore := newLazyContentStore(store, sources) - - var limiter *semaphore.Weighted - return remotes.PushContent(ctx, pusher, desc, lazyStore, limiter, platforms.All, wrapper) -} - -func findLazyChildren(ctx context.Context, desc ocispec.Descriptor, store content.Store) ([]ocispec.Descriptor, error) { - // Collect to hashset to remove duplicates - set := map[string]ocispec.Descriptor{} - - // Do a breadth-first search starting from this descriptor - queue := []ocispec.Descriptor{desc} - for len(queue) > 0 { - child := queue[0] - queue = queue[1:] - - if containerdimages.IsNonDistributable(child.MediaType) { - continue - } - - _, err := store.ReaderAt(ctx, child) - if err != nil { - if cerrdefs.IsNotFound(err) { - set[child.Digest.String()] = child - continue - } - return nil, err + out := streamformatter.NewJSONProgressOutput(outStream, false) + finishProgress := showProgress(ctx, jobs, out, combineProgress(pushProgress(tracker), pullProgress(store, false))) + defer func() { + finishProgress() + if outerr == nil { + progress.Messagef(out, "", "%s: digest: %s, size: %d", tag, target.Digest.String(), target.Size) } + }() - newChildren, err := containerdimages.Children(ctx, store, child) - if err != nil { - return nil, err - } - - if len(newChildren) > 0 { - queue = append(queue, newChildren...) - } - } + var limiter *semaphore.Weighted = nil // TODO: Respect max concurrent downloads/uploads + pusher := newLazyPusher(store, resolver, jobs, limiter, limiter) - result := make([]ocispec.Descriptor, 0, len(set)) - for _, desc := range set { - result = append(result, desc) - logrus.WithField("digest", desc.Digest.String()). - WithField("mediaType", desc.MediaType). - Debug("lazy children found") - } - - return result, nil -} - -// peekNotJson does a small peek of the content to check if content is definitely not JSON. -// It returns true if content is definitely not JSON, or false if it was unable to detect if it's -// JSON or not. -func peekNotJson(ctx context.Context, store content.Store, desc ocispec.Descriptor) (bool, error) { - readerAt, err := store.ReaderAt(ctx, desc) + leasedCtx, release, err := i.client.WithLease(ctx) if err != nil { - logrus.WithError(err).WithField("digest", desc.Digest).Debug("failed to create reader to peek for json") - return false, err - } - - buffer := []byte{0} - n, err := readerAt.ReadAt(buffer, 0) - if n != 1 || err != nil { - logrus.WithError(err).WithField("digest", desc.Digest).Debug("failed to peek json") - return false, err - } - - // It doesn't start with {, then it's not a json. - return rune(buffer[0]) != '{', nil -} - -func collectSources(ctx context.Context, desc ocispec.Descriptor, store content.Store) (map[digest.Digest]distributionSource, error) { - lazyChildren, err := findLazyChildren(ctx, desc, store) - if err != nil { - logrus.WithField("digest", desc.Digest.String()). - WithField("mediaType", desc.MediaType). - WithError(err).Error("failed to find lazy children referenced by descriptor") - return nil, err - } - - sources := map[digest.Digest]distributionSource{} - - success := errors.New("success, found the source but can't return earlier without an error") - err = store.Walk(ctx, func(i content.Info) error { - source := extractDistributionSource(i.Labels) - - // Nah, we're looking for a parent of this lazy child. - // This one will not provide us with the source. - if source.value == "" { - return nil - } - - desc := ocispec.Descriptor{Digest: i.Digest} - - // Do a simple peek of the content to avoid big blobs which definitely aren't json. - notJson, err := peekNotJson(ctx, store, desc) - if err != nil { - return err - } - if notJson { - logrus.WithField("digest", i.Digest).Debug("skipping, definitely not a json") - return nil - } - - // Read the manifest - blob, err := content.ReadBlob(ctx, store, desc) - if err != nil { - logrus.WithError(err).WithField("digest", i.Digest).Error("error reading blob") - return err - } - - // Manifests and indexes have different children. - // Index stores other manifests and manifests store layers. - // To avoid unmarshaling the blob separately as manifest and index - // this holds fields that contains them both and the media type. - var indexOrManifest struct { - MediaType string `json:"mediaType,omitempty"` - Manifests []ocispec.Descriptor `json:"manifests,omitempty"` - Layers []ocispec.Descriptor `json:"layers,omitempty"` - } - - err = json.Unmarshal(blob, &indexOrManifest) - if err != nil { - return nil - } - - mediaType := indexOrManifest.MediaType - // Just in case, check if it really is manifest or index. - if !containerdimages.IsManifestType(mediaType) && !containerdimages.IsIndexType(mediaType) { - return nil - } - if len(indexOrManifest.Layers) == 0 && len(indexOrManifest.Manifests) == 0 { - return nil - } - - // Look if this manifest/index specifies any of the lazy children - children := append(indexOrManifest.Layers, indexOrManifest.Manifests...) - for _, layer := range children { - for idx, wanted := range lazyChildren { - if layer.Digest == wanted.Digest { - // Found it! - sources[wanted.Digest] = source - - // Don't look for it anymore - if len(lazyChildren) > 1 { - lastIdx := len(lazyChildren) - 1 - lazyChildren[idx] = lazyChildren[lastIdx] - lazyChildren = lazyChildren[:lastIdx] - } else { - // We found all lazy children, let's end the walk. - lazyChildren = lazyChildren[:0] - return success - } - } - } - } - - return nil - }) - - if err == success { - err = nil - } - if len(lazyChildren) > 0 { - msg := "missing blobs with no source: " - for idx, c := range lazyChildren { - if idx != 0 { - msg += ", " - } - msg += c.Digest.String() - } - err = errdefs.NotFound(errors.New(msg)) - } - - return sources, err -} - -func extractDistributionSource(labels map[string]string) distributionSource { - var source distributionSource - - // Check if this blob has a distributionSource label - // if yes, read it as source - for k, v := range labels { - if strings.HasPrefix(k, "containerd.io/distribution.source.") { - source.key = k - source.value = v - break - } + return err } + defer release(leasedCtx) - return source -} - -type distributionSource struct { - key string - value string + return pusher.push(leasedCtx, ref, target, imageHandler) } diff --git a/daemon/containerd/lazy_push.go b/daemon/containerd/lazy_push.go new file mode 100644 index 0000000000000..a8b083799a6a5 --- /dev/null +++ b/daemon/containerd/lazy_push.go @@ -0,0 +1,532 @@ +package containerd + +import ( + "context" + "encoding/json" + "fmt" + "strings" + "sync" + + "github.com/containerd/containerd/content" + cerrdefs "github.com/containerd/containerd/errdefs" + containerdimages "github.com/containerd/containerd/images" + "github.com/containerd/containerd/platforms" + "github.com/containerd/containerd/remotes" + "github.com/containerd/containerd/remotes/docker" + "github.com/docker/distribution/reference" + "github.com/docker/docker/errdefs" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sync/errgroup" + "golang.org/x/sync/semaphore" +) + +type lazyPusher struct { + store content.Store + resolver remotes.Resolver + jobs *jobs + downloadLimiter *semaphore.Weighted + uploadLimiter *semaphore.Weighted +} + +func newLazyPusher(store content.Store, resolver remotes.Resolver, jobs *jobs, + downloadLimiter, uploadLimiter *semaphore.Weighted) *lazyPusher { + return &lazyPusher{ + store: store, + resolver: resolver, + jobs: jobs, + downloadLimiter: downloadLimiter, + uploadLimiter: uploadLimiter, + } +} + +const labelDistributionSource = "containerd.io/distribution.source." + +// push uploads the provided content to a remote resource. If not all +// required content is present in the local content store, then it's fetched +// from the source repository or mounted on with cross-repo mounts. +func (p *lazyPusher) push(ctx context.Context, ref reference.Named, root ocispec.Descriptor, imagesHandler containerdimages.HandlerFunc) error { + + refDigest := ref.String() + + // Annotate ref with digest to push only push tag for single digest + if !strings.Contains(refDigest, "@") { + refDigest = refDigest + "@" + root.Digest.String() + } + + pusher, err := p.resolver.Pusher(ctx, refDigest) + if err != nil { + return err + } + + sources, err := p.fetchMissingContent(ctx, ref, root) + if err != nil { + return err + } + + lazyStore := newLazyContentStore(p.store, sources) + wrapper := func(h containerdimages.Handler) containerdimages.Handler { + return containerdimages.Handlers(imagesHandler, h) + } + + return remotes.PushContent(ctx, pusher, root, lazyStore, p.uploadLimiter, platforms.All, wrapper) +} + +func (p *lazyPusher) fetchMissingContent(ctx context.Context, ref reference.Named, root ocispec.Descriptor) (map[digest.Digest]distributionSource, error) { + sources := map[digest.Digest]distributionSource{} + missing := []ocispec.Descriptor{} + next := []ocispec.Descriptor{root} + + for len(next) > 0 { + newMissing, err := findMissingContent(ctx, p.store, next...) + if err != nil { + return sources, err + } + missing = dedupDescriptors(append(missing, newMissing...)) + + logrus.WithField("missing", missing).Debug("searching sources for missing descriptors") + err = collectSources(ctx, missing, p.store, sources) + if err != nil { + return sources, err + } + + // Create a slice of descriptors that can be fetched now. + toFetch := []ocispec.Descriptor{} + for _, desc := range missing { + source, hasSource := sources[desc.Digest] + if hasSource && shouldDownload(ref, source, desc) { + toFetch = append(toFetch, desc) + p.jobs.Add(desc) + } + } + + fetched, err := p.fetch(ctx, sources, toFetch) + logrus.WithError(err). + WithField("fetched", fetched). + WithField("toFetch", toFetch). + Debug("fetch") + + if err != nil { + return sources, err + } + + if len(toFetch) > 0 && len(fetched) == 0 { + logrus.WithField("toFetch", toFetch).Error("failed to fetch any of the missing blobs") + return sources, err + } + + isFetched := func(desc ocispec.Descriptor) bool { + for _, f := range fetched { + if f.Digest == desc.Digest { + return true + } + } + return false + } + + // Remove fetched content from missing + missingMinusFetched := []ocispec.Descriptor{} + for _, m := range missing { + if !isFetched(m) { + missingMinusFetched = append(missingMinusFetched, m) + } + } + missing = missingMinusFetched + + next = fetched + } + + return sources, nil +} + +func dedupDescriptors(s []ocispec.Descriptor) []ocispec.Descriptor { + m := map[digest.Digest]ocispec.Descriptor{} + for _, d := range s { + m[d.Digest] = d + } + + out := []ocispec.Descriptor{} + for _, v := range m { + out = append(out, v) + } + + return out +} + +func shouldDownload(root reference.Named, source distributionSource, desc ocispec.Descriptor) bool { + mediaType := desc.MediaType + + // Manifests/indexes/configs cannot be cross-repo mounted so we have to download them + if containerdimages.IsManifestType(mediaType) { + return true + } + if containerdimages.IsIndexType(mediaType) { + return true + } + if containerdimages.IsConfigType(mediaType) { + return true + } + + registry := reference.Domain(root) + + // Cross-repo mount doesn't seem to work with insecure registries. + // Maybe it's only Docker Hub? + // TODO(vvoland): do the actual check when we support insecure registries + isInsecure := false + if isInsecure { + return true + } + + // If the source registry is the same as the one we are pushing to + // then the cross-repo mount will work, and we don't need to download. + return registry != source.Registry() +} + +func (p *lazyPusher) fetch(ctx context.Context, sources map[digest.Digest]distributionSource, missing []ocispec.Descriptor) ([]ocispec.Descriptor, error) { + fetched := []ocispec.Descriptor{} + mutex := sync.Mutex{} + eg, ctx := errgroup.WithContext(ctx) + + for _, desc := range missing { + log := logrus. + WithField("digest", desc.Digest.String()). + WithField("mediaType", desc.MediaType) + + source, ok := sources[desc.Digest] + if !ok { + log.Debug("no source") + continue + } + + desc := desc + eg.Go(func() error { + if p.downloadLimiter != nil { + if err := p.downloadLimiter.Acquire(ctx, 1); err != nil { + return err + } + defer p.downloadLimiter.Release(1) + } + + ref, err := source.GetReference(desc.Digest) + if err != nil { + return err + } + log = log.WithField("ref", ref.String()) + + name, resolved, err := p.resolver.Resolve(ctx, ref.String()) + if err != nil { + // If the size is set, we can just fallback to the original descriptor. + if desc.Size > 0 { + log.WithError(err).Debug("failed to resolve missing content, fallback to original") + name = ref.String() + resolved = desc + } else { + return err + } + } + + log.WithField("name", name).Debug("resolved missing content") + fetcher, err := p.resolver.Fetcher(ctx, name) + if err != nil { + log.WithError(err).Debug("failed to create fetcher") + return err + } + + appendDistributionSourceLabel, err := docker.AppendDistributionSourceLabel(p.store, ref.String()) + if err != nil { + return err + } + + fetchHandler := containerdimages.Handlers( + remotes.FetchHandler(p.store, fetcher), + appendDistributionSourceLabel, + appendLabelHandler(ctx, p.store, "docker.io/fetch.reason", "push"), + ) + + _, err = fetchHandler(ctx, resolved) + if err != nil { + log.WithError(err).Debug("failed to fetch") + return err + } + + log.Debug("fetched!") + mutex.Lock() + fetched = append(fetched, desc) + mutex.Unlock() + return nil + }) + } + + return fetched, eg.Wait() +} + +// appendLabelHandler returns a handler which adds a label with value to the handled content +func appendLabelHandler(ctx context.Context, manager content.Manager, key, value string) containerdimages.HandlerFunc { + return containerdimages.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { + info, err := manager.Info(ctx, desc.Digest) + if err != nil { + return nil, err + } + if info.Labels == nil { + info.Labels = map[string]string{} + } + info.Labels[key] = value + + _, err = manager.Update(ctx, info, "labels."+key) + return nil, err + }) +} + +// contentDoesntExist returns true only if content is not present in store and +// there was no other error. +func contentDoesntExist(ctx context.Context, store content.Store, desc ocispec.Descriptor) (bool, error) { + // Don't use store.Info to make this also work with the lazyContentStore + // which doesn't return NotFound error + r, err := store.ReaderAt(ctx, desc) + if err == nil { + r.Close() + } else { + if cerrdefs.IsNotFound(err) { + return true, nil + } + } + + return false, err +} + +// findMissingContent traverses the children of the given descriptors and returns +// descriptors of contents that are not present in the content store. +func findMissingContent(ctx context.Context, store content.Store, desc ...ocispec.Descriptor) ([]ocispec.Descriptor, error) { + // Collect to hashset to remove duplicates + set := map[digest.Digest]ocispec.Descriptor{} + mutex := sync.Mutex{} + + err := containerdimages.Dispatch(ctx, + containerdimages.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + mt := desc.MediaType + + if containerdimages.IsNonDistributable(mt) { + return nil, containerdimages.ErrSkipDesc + } + + doesntExist, err := contentDoesntExist(ctx, store, desc) + if err != nil { + return nil, err + } + if doesntExist { + mutex.Lock() + defer mutex.Unlock() + set[desc.Digest] = desc + return nil, nil + } + + children, err := containerdimages.Children(ctx, store, desc) + return children, err + }), + nil, desc...) + + if err != nil { + return nil, err + } + + result := make([]ocispec.Descriptor, 0, len(set)) + for _, desc := range set { + result = append(result, desc) + logrus.WithField("digest", desc.Digest.String()). + WithField("mediaType", desc.MediaType). + Debug("missing content") + } + + return result, nil +} + +// peekNotJson does a small peek of the content to check if content is definitely not JSON. +// It returns true if content is definitely not JSON, or false if it was unable to detect if it's +// JSON or not. +func peekNotJson(ctx context.Context, store content.Provider, desc ocispec.Descriptor) (bool, error) { + readerAt, err := store.ReaderAt(ctx, desc) + if err != nil { + logrus.WithError(err).WithField("digest", desc.Digest).Debug("failed to create reader to peek for json") + return false, err + } + defer readerAt.Close() + + buffer := []byte{0} + n, err := readerAt.ReadAt(buffer, 0) + if n != 1 || err != nil { + logrus.WithError(err).WithField("digest", desc.Digest).Debug("failed to peek json") + return false, err + } + + // It doesn't start with {, then it's not a json. + return rune(buffer[0]) != '{', nil +} + +// collectSources walks the content store and looks for content which can +// provide a source registry and repository for the provided descriptors from +// the containerd.io/distribution.source labels +func collectSources(ctx context.Context, toCollect []ocispec.Descriptor, store content.Store, sources map[digest.Digest]distributionSource) error { + // Nothing to do. + if len(toCollect) == 0 { + return nil + } + + // Make a copy of the missing descriptors as we will be removing + // the descriptors that we found a source for. + missing := make([]ocispec.Descriptor, len(toCollect)) + copy(missing, toCollect) + + success := errors.New("success, found the source but can't return earlier without an error") + err := store.Walk(ctx, func(i content.Info) error { + source := extractDistributionSource(i.Labels) + log := logrus. + WithField("digest", i.Digest) + + log.Debug("walk") + + // Nah, we're looking for a parent of this lazy child. + // This one will not provide us with the source. + if source.value == "" { + return nil + } + + desc := ocispec.Descriptor{Digest: i.Digest} + + // Do a simple peek of the content to avoid big blobs which definitely aren't json. + notJson, err := peekNotJson(ctx, store, desc) + if err != nil { + return err + } + if notJson { + log.Debug("skipping, definitely not a json") + return nil + } + + // Read the manifest + blob, err := content.ReadBlob(ctx, store, desc) + if err != nil { + log.WithError(err).Error("error reading blob") + return err + } + + // Manifests and indexes have different children. + // Index stores other manifests and manifests store layers. + // To avoid unmarshaling the blob separately as manifest and index + // this holds fields that contains them both and the media type. + var indexOrManifest struct { + MediaType string `json:"mediaType,omitempty"` + Manifests []ocispec.Descriptor `json:"manifests,omitempty"` + Layers []ocispec.Descriptor `json:"layers,omitempty"` + Config ocispec.Descriptor `json:"config,omitempty"` + } + + err = json.Unmarshal(blob, &indexOrManifest) + if err != nil { + log.WithError(err).Debug("unmarshal failed") + return nil + } + + mediaType := indexOrManifest.MediaType + // Just in case, check if it really is manifest or index. + if !containerdimages.IsManifestType(mediaType) && !containerdimages.IsIndexType(mediaType) { + log.Debug("not a manifest/index") + return nil + } + children := append(indexOrManifest.Layers, indexOrManifest.Manifests...) + if indexOrManifest.Config.Digest != digest.Digest("") { + children = append(children, indexOrManifest.Config) + } + + if len(children) == 0 { + log.Debug("empty a manifest/index") + return nil + } + + // Look if this manifest/index specifies any of the missing content + for _, layer := range children { + for idx := 0; idx < len(missing); idx += 1 { + wanted := missing[idx] + if layer.Digest == wanted.Digest { + // Found it! + sources[wanted.Digest] = source + log.WithField("wanted", wanted.Digest.String()).Debug("found") + + // Don't look for it anymore + if len(missing) > 1 { + lastIdx := len(missing) - 1 + missing[idx] = missing[lastIdx] + missing = missing[:lastIdx] + idx -= 1 + } else { + // We found all missing, let's end the walk. + missing = missing[:0] + return success + } + } + } + } + + return nil + }) + + if err == success { + err = nil + } + if len(missing) > 0 { + msg := "missing blobs with no source: " + for idx, c := range missing { + if idx != 0 { + msg += ", " + } + msg += c.Digest.String() + } + err = errdefs.NotFound(errors.New(msg)) + } + + return err +} + +func extractDistributionSource(labels map[string]string) distributionSource { + var source distributionSource + + // Check if this blob has a distributionSource label + // if yes, read it as source + for k, v := range labels { + if strings.HasPrefix(k, labelDistributionSource) { + source.key = k + source.value = v + break + } + } + + return source +} + +type distributionSource struct { + key string + value string +} + +func (source distributionSource) Registry() string { + registry := strings.TrimPrefix(source.key, labelDistributionSource) + if registry == source.key { + return "" + } + return registry +} + +func (source distributionSource) GetReference(dgst digest.Digest) (reference.Named, error) { + registry := source.Registry() + if registry == "" { + return nil, fmt.Errorf("invalid distribution source label %s=%s", source.key, source.value) + } + + ref, err := reference.ParseNamed(registry + "/" + source.value) + if err != nil { + return nil, err + } + + return reference.WithDigest(ref, dgst) +} diff --git a/daemon/containerd/progress.go b/daemon/containerd/progress.go index 81e039b6fe7a6..e792127f55be2 100644 --- a/daemon/containerd/progress.go +++ b/daemon/containerd/progress.go @@ -2,7 +2,6 @@ package containerd import ( "context" - "io" "sync" "time" @@ -12,7 +11,6 @@ import ( "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/streamformatter" "github.com/docker/docker/pkg/stringid" "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" @@ -21,12 +19,11 @@ import ( type updateProgressFunc func(ctx context.Context, ongoing *jobs, output progress.Output, start time.Time) error -func showProgress(ctx context.Context, ongoing *jobs, w io.Writer, updateFunc updateProgressFunc) func() { +func showProgress(ctx context.Context, ongoing *jobs, out progress.Output, updateFunc updateProgressFunc) func() { stop := make(chan struct{}) ctx, cancelProgress := context.WithCancel(ctx) var ( - out = streamformatter.NewJSONProgressOutput(w, false) ticker = time.NewTicker(100 * time.Millisecond) start = time.Now() ) @@ -66,6 +63,18 @@ func showProgress(ctx context.Context, ongoing *jobs, w io.Writer, updateFunc up } } +func combineProgress(fns ...updateProgressFunc) updateProgressFunc { + return func(ctx context.Context, ongoing *jobs, out progress.Output, start time.Time) error { + for _, f := range fns { + err := f(ctx, ongoing, out, start) + if err != nil { + return err + } + } + return nil + } +} + func pushProgress(tracker docker.StatusTracker) updateProgressFunc { return func(ctx context.Context, ongoing *jobs, out progress.Output, start time.Time) error { for _, j := range ongoing.Jobs() { @@ -103,7 +112,7 @@ func pushProgress(tracker docker.StatusTracker) updateProgressFunc { } } -func pullProgress(cs content.Store) updateProgressFunc { +func pullProgress(cs content.Store, showExists bool) updateProgressFunc { return func(ctx context.Context, ongoing *jobs, out progress.Output, start time.Time) error { pulling := map[string]content.Status{} actives, err := cs.ListStatuses(ctx, "") @@ -141,7 +150,7 @@ func pullProgress(cs content.Store) updateProgressFunc { LastUpdate: true, }) ongoing.Remove(j) - } else { + } else if showExists { out.WriteProgress(progress.Progress{ ID: stringid.TruncateID(j.Digest.Encoded()), Action: "Exists", From 6c6f1af54bc0d1a842cccc3f855267099d36a6d5 Mon Sep 17 00:00:00 2001 From: Nicolas De Loof Date: Wed, 7 Sep 2022 11:26:30 +0200 Subject: [PATCH 76/90] Preserve Labels during docker tag Signed-off-by: Nicolas De Loof --- daemon/containerd/image_tag.go | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/daemon/containerd/image_tag.go b/daemon/containerd/image_tag.go index b0d01347136fe..5bcf46a3a8576 100644 --- a/daemon/containerd/image_tag.go +++ b/daemon/containerd/image_tag.go @@ -35,14 +35,15 @@ func (i *ImageService) TagImage(ctx context.Context, imageName, repository, tag func (i *ImageService) TagImageWithReference(ctx context.Context, imageID image.ID, newTag reference.Named) error { logrus.Infof("Tagging image %q with reference %q", imageID, newTag.String()) - desc, err := i.ResolveImage(ctx, imageID.String()) + ci, _, err := i.getImage(ctx, imageID.String()) if err != nil { return err } img := containerdimages.Image{ Name: newTag.String(), - Target: desc, + Target: ci.Target(), + Labels: ci.Labels(), } is := i.client.ImageService() From c6836be3636d775bac49bb4a58f2a706f402df56 Mon Sep 17 00:00:00 2001 From: Djordje Lukic Date: Thu, 8 Sep 2022 16:10:46 +0200 Subject: [PATCH 77/90] Set the user on exec --- daemon/exec.go | 2 +- daemon/exec_linux.go | 49 +++++++++++++++++++++++++++++++++++---- daemon/exec_linux_test.go | 3 ++- daemon/exec_windows.go | 4 +++- 4 files changed, 50 insertions(+), 8 deletions(-) diff --git a/daemon/exec.go b/daemon/exec.go index 08687760a720b..9a52d04eddae9 100644 --- a/daemon/exec.go +++ b/daemon/exec.go @@ -256,7 +256,7 @@ func (daemon *Daemon) ContainerExecStart(ctx context.Context, name string, optio p.Cwd = "/" } - if err := daemon.execSetPlatformOpt(c, ec, p); err != nil { + if err := daemon.execSetPlatformOpt(ctx, c, ec, p); err != nil { return err } diff --git a/daemon/exec_linux.go b/daemon/exec_linux.go index d0090d60973dc..f51b105ecedb3 100644 --- a/daemon/exec_linux.go +++ b/daemon/exec_linux.go @@ -3,6 +3,9 @@ package daemon // import "github.com/docker/docker/daemon" import ( "context" + "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/oci" + coci "github.com/containerd/containerd/oci" "github.com/containerd/containerd/pkg/apparmor" "github.com/docker/docker/container" "github.com/docker/docker/daemon/exec" @@ -10,12 +13,48 @@ import ( specs "github.com/opencontainers/runtime-spec/specs-go" ) -func (daemon *Daemon) execSetPlatformOpt(c *container.Container, ec *exec.Config, p *specs.Process) error { +// withResetAdditionalGIDs resets additonal GIDs +// This code is based nerdctl, under Apache License +// https://github.com/containerd/nerdctl/blob/2bbd998a1c95e6682120918d9a07a24ccef4f5fb/cmd/nerdctl/run_user.go#L69 +func withResetAdditionalGIDs() oci.SpecOpts { + return func(_ context.Context, _ oci.Client, _ *containers.Container, s *oci.Spec) error { + s.Process.User.AdditionalGids = nil + return nil + } +} + +func (daemon *Daemon) execSetPlatformOpt(ctx context.Context, c *container.Container, ec *exec.Config, p *specs.Process) error { if len(ec.User) > 0 { - var err error - p.User, err = getUser(c, ec.User) - if err != nil { - return err + if daemon.UsesSnapshotter() { + cc, err := daemon.containerdCli.LoadContainer(ctx, c.ID) + if err != nil { + return err + } + ci, err := cc.Info(ctx) + if err != nil { + return err + } + spec, err := cc.Spec(ctx) + if err != nil { + return err + } + opts := []oci.SpecOpts{ + coci.WithUser(ec.User), + withResetAdditionalGIDs(), + coci.WithAdditionalGIDs(ec.User), + } + for _, opt := range opts { + if err := opt(ctx, daemon.containerdCli, &ci, spec); err != nil { + return err + } + } + p.User = spec.Process.User + } else { + var err error + p.User, err = getUser(c, ec.User) + if err != nil { + return err + } } } if ec.Privileged { diff --git a/daemon/exec_linux_test.go b/daemon/exec_linux_test.go index ffef343898e79..36eb59b7d5326 100644 --- a/daemon/exec_linux_test.go +++ b/daemon/exec_linux_test.go @@ -4,6 +4,7 @@ package daemon import ( + "context" "testing" "github.com/containerd/containerd/pkg/apparmor" @@ -82,7 +83,7 @@ func TestExecSetPlatformOptAppArmor(t *testing.T) { ec := &exec.Config{Privileged: execPrivileged} p := &specs.Process{} - err := d.execSetPlatformOpt(c, ec, p) + err := d.execSetPlatformOpt(context.TODO(), c, ec, p) assert.NilError(t, err) assert.Equal(t, p.ApparmorProfile, tc.expectedProfile) }) diff --git a/daemon/exec_windows.go b/daemon/exec_windows.go index 32f16e9282c2e..9c6b7ce623518 100644 --- a/daemon/exec_windows.go +++ b/daemon/exec_windows.go @@ -1,12 +1,14 @@ package daemon // import "github.com/docker/docker/daemon" import ( + "context" + "github.com/docker/docker/container" "github.com/docker/docker/daemon/exec" specs "github.com/opencontainers/runtime-spec/specs-go" ) -func (daemon *Daemon) execSetPlatformOpt(c *container.Container, ec *exec.Config, p *specs.Process) error { +func (daemon *Daemon) execSetPlatformOpt(_ context.Context, c *container.Container, ec *exec.Config, p *specs.Process) error { if c.OS == "windows" { p.User.Username = ec.User } From a8b5886f2bc1ddfbb228c74d4d6699e95c221549 Mon Sep 17 00:00:00 2001 From: Nicolas De Loof Date: Mon, 12 Sep 2022 10:40:45 +0200 Subject: [PATCH 78/90] c8d/resolver: Use hosts from daemon configuration MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Paweł Gronowski Signed-off-by: Nicolas De Loof --- daemon/containerd/image_pull.go | 2 +- daemon/containerd/image_push.go | 2 +- daemon/containerd/resolver.go | 64 +++++++++++++++++++++------------ daemon/containerd/service.go | 25 ++++++++----- daemon/daemon.go | 2 +- 5 files changed, 61 insertions(+), 34 deletions(-) diff --git a/daemon/containerd/image_pull.go b/daemon/containerd/image_pull.go index b6d72ab0b3f36..d4661314eabfe 100644 --- a/daemon/containerd/image_pull.go +++ b/daemon/containerd/image_pull.go @@ -46,7 +46,7 @@ func (i *ImageService) PullImage(ctx context.Context, image, tagOrDigest string, } } - resolver, _ := newResolverFromAuthConfig(authConfig) + resolver, _ := i.newResolverFromAuthConfig(authConfig) opts = append(opts, containerd.WithResolver(resolver)) jobs := newJobs() diff --git a/daemon/containerd/image_push.go b/daemon/containerd/image_push.go index b83a43f0c6724..29d6f9758ff55 100644 --- a/daemon/containerd/image_push.go +++ b/daemon/containerd/image_push.go @@ -60,7 +60,7 @@ func (i *ImageService) PushImage(ctx context.Context, image, tag string, metaHea jobs := newJobs() - resolver, tracker := newResolverFromAuthConfig(authConfig) + resolver, tracker := i.newResolverFromAuthConfig(authConfig) imageHandler := containerdimages.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { logrus.WithField("digest", desc.Digest.String()). diff --git a/daemon/containerd/resolver.go b/daemon/containerd/resolver.go index 3a16ff2615868..e01001437a439 100644 --- a/daemon/containerd/resolver.go +++ b/daemon/containerd/resolver.go @@ -8,32 +8,52 @@ import ( "github.com/sirupsen/logrus" ) -func newResolverFromAuthConfig(authConfig *registrytypes.AuthConfig) (remotes.Resolver, docker.StatusTracker) { - opts := []docker.RegistryOpt{} - - if authConfig != nil { - cfgHost := registry.ConvertToHostname(authConfig.ServerAddress) - if cfgHost == registry.IndexHostname { - cfgHost = registry.DefaultRegistryHost - } - authorizer := docker.NewDockerAuthorizer(docker.WithAuthCreds(func(host string) (string, string, error) { - if cfgHost != host { - logrus.WithField("host", host).WithField("cfgHost", cfgHost).Warn("Host doesn't match") - return "", "", nil - } - if authConfig.IdentityToken != "" { - return "", authConfig.IdentityToken, nil - } - return authConfig.Username, authConfig.Password, nil - })) - - opts = append(opts, docker.WithAuthorizer(authorizer)) - } +func (i *ImageService) newResolverFromAuthConfig(authConfig *registrytypes.AuthConfig) (remotes.Resolver, docker.StatusTracker) { + hostsFn := i.registryHosts.RegistryHosts() + hosts := hostsAuthorizerWrapper(hostsFn, authConfig) tracker := docker.NewInMemoryTracker() return docker.NewResolver(docker.ResolverOptions{ - Hosts: docker.ConfigureDefaultRegistries(opts...), + Hosts: hosts, Tracker: tracker, }), tracker } + +func hostsAuthorizerWrapper(hostsFn docker.RegistryHosts, authConfig *registrytypes.AuthConfig) docker.RegistryHosts { + return docker.RegistryHosts(func(n string) ([]docker.RegistryHost, error) { + hosts, err := hostsFn(n) + if err == nil { + for idx, host := range hosts { + if host.Authorizer == nil { + var opts []docker.AuthorizerOpt + if authConfig != nil { + opts = append(opts, authorizationCredsFromAuthConfig(*authConfig)) + } + host.Authorizer = docker.NewDockerAuthorizer(opts...) + hosts[idx] = host + } + } + } + + return hosts, err + }) +} + +func authorizationCredsFromAuthConfig(authConfig registrytypes.AuthConfig) docker.AuthorizerOpt { + cfgHost := registry.ConvertToHostname(authConfig.ServerAddress) + if cfgHost == registry.IndexHostname { + cfgHost = registry.DefaultRegistryHost + } + + return docker.WithAuthCreds(func(host string) (string, string, error) { + if cfgHost != host { + logrus.WithField("host", host).WithField("cfgHost", cfgHost).Warn("Host doesn't match") + return "", "", nil + } + if authConfig.IdentityToken != "" { + return "", authConfig.IdentityToken, nil + } + return authConfig.Username, authConfig.Password, nil + }) +} diff --git a/daemon/containerd/service.go b/daemon/containerd/service.go index 84bf86866d4f3..a1ef796ab9d4b 100644 --- a/daemon/containerd/service.go +++ b/daemon/containerd/service.go @@ -8,6 +8,7 @@ import ( "github.com/containerd/containerd" cerrdefs "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/plugin" + "github.com/containerd/containerd/remotes/docker" "github.com/containerd/containerd/snapshots" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" @@ -22,19 +23,25 @@ import ( // ImageService implements daemon.ImageService type ImageService struct { - client *containerd.Client - usage singleflight.Group - containers container.Store - snapshotter string - pruneRunning int32 + client *containerd.Client + usage singleflight.Group + containers container.Store + snapshotter string + pruneRunning int32 + registryHosts RegistryHostsProvider +} + +type RegistryHostsProvider interface { + RegistryHosts() docker.RegistryHosts } // NewService creates a new ImageService. -func NewService(c *containerd.Client, containers container.Store, snapshotter string) *ImageService { +func NewService(c *containerd.Client, containers container.Store, snapshotter string, hostsProvider RegistryHostsProvider) *ImageService { return &ImageService{ - client: c, - containers: containers, - snapshotter: snapshotter, + client: c, + containers: containers, + snapshotter: snapshotter, + registryHosts: hostsProvider, } } diff --git a/daemon/daemon.go b/daemon/daemon.go index 4d0150d432031..dc8d14dc5e0e8 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -1014,7 +1014,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S if err := configureKernelSecuritySupport(config, driverName); err != nil { return nil, err } - d.imageService = ctrd.NewService(d.containerdCli, d.containers, driverName) + d.imageService = ctrd.NewService(d.containerdCli, d.containers, driverName, d) } else { layerStore, err := layer.NewStoreFromOptions(layer.StoreOptions{ Root: config.Root, From 36dab4769b946d01934d0dfefdd40441a5092958 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pawe=C5=82=20Gronowski?= Date: Thu, 1 Sep 2022 17:03:10 +0200 Subject: [PATCH 79/90] c8d/resolver: Fallback to http for insecure registries MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Paweł Gronowski --- daemon/containerd/resolver.go | 58 ++++++++++++++++++++++++++--------- daemon/containerd/service.go | 25 ++++++++------- daemon/daemon.go | 10 +++++- registry/service.go | 6 ++++ 4 files changed, 73 insertions(+), 26 deletions(-) diff --git a/daemon/containerd/resolver.go b/daemon/containerd/resolver.go index e01001437a439..a4ffade25c551 100644 --- a/daemon/containerd/resolver.go +++ b/daemon/containerd/resolver.go @@ -1,6 +1,9 @@ package containerd import ( + "net/http" + "strings" + "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" registrytypes "github.com/docker/docker/api/types/registry" @@ -9,10 +12,10 @@ import ( ) func (i *ImageService) newResolverFromAuthConfig(authConfig *registrytypes.AuthConfig) (remotes.Resolver, docker.StatusTracker) { + tracker := docker.NewInMemoryTracker() hostsFn := i.registryHosts.RegistryHosts() - hosts := hostsAuthorizerWrapper(hostsFn, authConfig) - tracker := docker.NewInMemoryTracker() + hosts := hostsWrapper(hostsFn, authConfig, i.registryService) return docker.NewResolver(docker.ResolverOptions{ Hosts: hosts, @@ -20,23 +23,29 @@ func (i *ImageService) newResolverFromAuthConfig(authConfig *registrytypes.AuthC }), tracker } -func hostsAuthorizerWrapper(hostsFn docker.RegistryHosts, authConfig *registrytypes.AuthConfig) docker.RegistryHosts { +func hostsWrapper(hostsFn docker.RegistryHosts, authConfig *registrytypes.AuthConfig, regService *registry.Service) docker.RegistryHosts { return docker.RegistryHosts(func(n string) ([]docker.RegistryHost, error) { hosts, err := hostsFn(n) - if err == nil { - for idx, host := range hosts { - if host.Authorizer == nil { - var opts []docker.AuthorizerOpt - if authConfig != nil { - opts = append(opts, authorizationCredsFromAuthConfig(*authConfig)) - } - host.Authorizer = docker.NewDockerAuthorizer(opts...) - hosts[idx] = host + if err != nil { + return nil, err + } + + for idx, host := range hosts { + if host.Authorizer == nil { + var opts []docker.AuthorizerOpt + if authConfig != nil { + opts = append(opts, authorizationCredsFromAuthConfig(*authConfig)) } + host.Authorizer = docker.NewDockerAuthorizer(opts...) + + isInsecure := regService.IsInsecureRegistry(host.Host) + if host.Client.Transport != nil && isInsecure { + host.Client.Transport = httpFallback{super: host.Client.Transport} + } + hosts[idx] = host } } - - return hosts, err + return hosts, nil }) } @@ -57,3 +66,24 @@ func authorizationCredsFromAuthConfig(authConfig registrytypes.AuthConfig) docke return authConfig.Username, authConfig.Password, nil }) } + +type httpFallback struct { + super http.RoundTripper +} + +func (f httpFallback) RoundTrip(r *http.Request) (*http.Response, error) { + resp, err := f.super.RoundTrip(r) + if err != nil { + if strings.Contains(err.Error(), "http: server gave HTTP response to HTTPS client") { + plainHttpUrl := *r.URL + plainHttpUrl.Scheme = "http" + + plainHttpRequest := *r + plainHttpRequest.URL = &plainHttpUrl + + return http.DefaultTransport.RoundTrip(&plainHttpRequest) + } + } + + return resp, err +} diff --git a/daemon/containerd/service.go b/daemon/containerd/service.go index a1ef796ab9d4b..e942f48b1a43b 100644 --- a/daemon/containerd/service.go +++ b/daemon/containerd/service.go @@ -16,6 +16,7 @@ import ( "github.com/docker/docker/daemon/images" "github.com/docker/docker/image" "github.com/docker/docker/layer" + "github.com/docker/docker/registry" "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/identity" "golang.org/x/sync/singleflight" @@ -23,12 +24,13 @@ import ( // ImageService implements daemon.ImageService type ImageService struct { - client *containerd.Client - usage singleflight.Group - containers container.Store - snapshotter string - pruneRunning int32 - registryHosts RegistryHostsProvider + client *containerd.Client + usage singleflight.Group + containers container.Store + snapshotter string + pruneRunning int32 + registryHosts RegistryHostsProvider + registryService *registry.Service } type RegistryHostsProvider interface { @@ -36,12 +38,13 @@ type RegistryHostsProvider interface { } // NewService creates a new ImageService. -func NewService(c *containerd.Client, containers container.Store, snapshotter string, hostsProvider RegistryHostsProvider) *ImageService { +func NewService(c *containerd.Client, containers container.Store, snapshotter string, hostsProvider RegistryHostsProvider, registry *registry.Service) *ImageService { return &ImageService{ - client: c, - containers: containers, - snapshotter: snapshotter, - registryHosts: hostsProvider, + client: c, + containers: containers, + snapshotter: snapshotter, + registryHosts: hostsProvider, + registryService: registry, } } diff --git a/daemon/daemon.go b/daemon/daemon.go index dc8d14dc5e0e8..d518f0463b462 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -15,6 +15,7 @@ import ( "path" "path/filepath" "runtime" + "strings" "sync" "time" @@ -173,6 +174,13 @@ func (daemon *Daemon) RegistryHosts() docker.RegistryHosts { for _, v := range daemon.configStore.InsecureRegistries { u, err := url.Parse(v) + if err != nil && !strings.HasPrefix(v, "http://") && !strings.HasPrefix(v, "https://") { + originalErr := err + u, err = url.Parse("http://" + v) + if err != nil { + err = originalErr + } + } c := resolverconfig.RegistryConfig{} if err == nil { v = u.Host @@ -1014,7 +1022,7 @@ func NewDaemon(ctx context.Context, config *config.Config, pluginStore *plugin.S if err := configureKernelSecuritySupport(config, driverName); err != nil { return nil, err } - d.imageService = ctrd.NewService(d.containerdCli, d.containers, driverName, d) + d.imageService = ctrd.NewService(d.containerdCli, d.containers, driverName, d, d.registryService) } else { layerStore, err := layer.NewStoreFromOptions(layer.StoreOptions{ Root: config.Root, diff --git a/registry/service.go b/registry/service.go index 0415a48a7704c..b3fe7db6ffb52 100644 --- a/registry/service.go +++ b/registry/service.go @@ -146,3 +146,9 @@ func (s *Service) LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, } return endpoints, err } + +// IsInsecureRegistry returns true if the registry at given host is configured as +// insecure registry. +func (s *Service) IsInsecureRegistry(host string) bool { + return !s.config.isSecureIndex(host) +} From 2dc83288871cc6009001a85d618061854ff6e88c Mon Sep 17 00:00:00 2001 From: Djordje Lukic Date: Mon, 12 Sep 2022 10:48:52 +0200 Subject: [PATCH 80/90] Refactor resolving/getting images There were a lot of useless duplicated code Signed-off-by: Djordje Lukic --- daemon/containerd/image.go | 112 +++++---------------------------- daemon/containerd/image_tag.go | 4 +- 2 files changed, 19 insertions(+), 97 deletions(-) diff --git a/daemon/containerd/image.go b/daemon/containerd/image.go index 170bbb01bbd55..aecb48bac7c16 100644 --- a/daemon/containerd/image.go +++ b/daemon/containerd/image.go @@ -26,8 +26,8 @@ var shortID = regexp.MustCompile(`^([a-f0-9]{4,64})$`) // GetContainerdImage returns the containerd image corresponding to the image referred to by refOrID. // The platform parameter is currently ignored -func (i *ImageService) GetContainerdImage(ctx context.Context, refOrID string, platform *ocispec.Platform) (containerdimages.Image, error) { - return i.resolveImageName2(ctx, refOrID) +func (i *ImageService) GetContainerdImage(ctx context.Context, refOrID string, platform *ocispec.Platform) (img containerdimages.Image, err error) { + return i.resolveImage(ctx, refOrID) } // GetImage returns an image corresponding to the image referred to by refOrID. @@ -68,12 +68,13 @@ func (i *ImageService) GetImage(ctx context.Context, refOrID string, options ima } func (i *ImageService) getImage(ctx context.Context, refOrID string) (containerd.Image, *image.Image, error) { - desc, err := i.ResolveImage(ctx, refOrID) + img, err := i.resolveImage(ctx, refOrID) if err != nil { return nil, nil, err } - ctrdimg, err := i.resolveImageName2(ctx, refOrID) + // TODO(rumpl): pass the platform + ctrdimg, err := i.GetContainerdImage(ctx, refOrID, nil) if err != nil { return nil, nil, err } @@ -108,7 +109,7 @@ func (i *ImageService) getImage(ctx context.Context, refOrID string) (containerd } return ii, &image.Image{ V1Image: image.V1Image{ - ID: string(desc.Digest), + ID: string(img.Target.Digest), OS: ociimage.OS, Architecture: ociimage.Architecture, Config: &containertypes.Config{ @@ -125,18 +126,13 @@ func (i *ImageService) getImage(ctx context.Context, refOrID string) (containerd }, nil } -// ResolveImage searches for an image based on the given +// resolveImage searches for an image based on the given // reference or identifier. Returns the descriptor of // the image, could be manifest list, manifest, or config. -func (i *ImageService) ResolveImage(ctx context.Context, refOrID string) (d ocispec.Descriptor, err error) { - d, _, err = i.resolveImageName(ctx, refOrID) - return -} - -func (i *ImageService) resolveImageName2(ctx context.Context, refOrID string) (img containerdimages.Image, err error) { +func (i *ImageService) resolveImage(ctx context.Context, refOrID string) (img containerdimages.Image, err error) { parsed, err := reference.ParseAnyReference(refOrID) if err != nil { - return img, errdefs.InvalidParameter(err) + return containerdimages.Image{}, errdefs.InvalidParameter(err) } is := i.client.ImageService() @@ -145,15 +141,15 @@ func (i *ImageService) resolveImageName2(ctx context.Context, refOrID string) (i if !ok { digested, ok := parsed.(reference.Digested) if !ok { - return img, errdefs.InvalidParameter(errors.New("bad reference")) + return containerdimages.Image{}, errdefs.InvalidParameter(errors.New("bad reference")) } imgs, err := is.List(ctx, fmt.Sprintf("target.digest==%s", digested.Digest())) if err != nil { - return img, errors.Wrap(err, "failed to lookup digest") + return containerdimages.Image{}, errors.Wrap(err, "failed to lookup digest") } if len(imgs) == 0 { - return img, errdefs.NotFound(errors.New("image not found with digest")) + return containerdimages.Image{}, errdefs.NotFound(errors.New("image not found with digest")) } return imgs[0], nil @@ -170,11 +166,11 @@ func (i *ImageService) resolveImageName2(ctx context.Context, refOrID string) (i } imgs, err := is.List(ctx, filters...) if err != nil { - return img, err + return containerdimages.Image{}, err } if len(imgs) == 0 { - return img, errdefs.NotFound(errors.New("list returned no images")) + return containerdimages.Image{}, errdefs.NotFound(errors.New("list returned no images")) } if len(imgs) > 1 { digests := map[digest.Digest]struct{}{} @@ -186,7 +182,7 @@ func (i *ImageService) resolveImageName2(ctx context.Context, refOrID string) (i } if len(digests) > 1 { - return img, errdefs.NotFound(errors.New("ambiguous reference")) + return containerdimages.Image{}, errdefs.NotFound(errors.New("ambiguous reference")) } } @@ -199,88 +195,14 @@ func (i *ImageService) resolveImageName2(ctx context.Context, refOrID string) (i if err != nil { // TODO(containerd): error translation can use common function if !cerrdefs.IsNotFound(err) { - return img, err + return containerdimages.Image{}, err } - return img, errdefs.NotFound(errors.New("id not found")) + return containerdimages.Image{}, errdefs.NotFound(errors.New("id not found")) } return img, nil } -func (i *ImageService) resolveImageName(ctx context.Context, refOrID string) (ocispec.Descriptor, reference.Named, error) { - parsed, err := reference.ParseAnyReference(refOrID) - if err != nil { - return ocispec.Descriptor{}, nil, errdefs.InvalidParameter(err) - } - - is := i.client.ImageService() - - namedRef, ok := parsed.(reference.Named) - if !ok { - digested, ok := parsed.(reference.Digested) - if !ok { - return ocispec.Descriptor{}, nil, errdefs.InvalidParameter(errors.New("bad reference")) - } - - imgs, err := is.List(ctx, fmt.Sprintf("target.digest==%s", digested.Digest())) - if err != nil { - return ocispec.Descriptor{}, nil, errors.Wrap(err, "failed to lookup digest") - } - if len(imgs) == 0 { - return ocispec.Descriptor{}, nil, errdefs.NotFound(errors.New("image not found with digest")) - } - - return imgs[0].Target, nil, nil - } - - namedRef = reference.TagNameOnly(namedRef) - - // If the identifier could be a short ID, attempt to match - if shortID.MatchString(refOrID) { - ref := namedRef.String() - filters := []string{ - fmt.Sprintf("name==%q", ref), - fmt.Sprintf(`target.digest~=/sha256:%s[0-9a-fA-F]{%d}/`, refOrID, 64-len(refOrID)), - } - imgs, err := is.List(ctx, filters...) - if err != nil { - return ocispec.Descriptor{}, nil, err - } - - if len(imgs) == 0 { - return ocispec.Descriptor{}, nil, errdefs.NotFound(errors.New("list returned no images")) - } - if len(imgs) > 1 { - digests := map[digest.Digest]struct{}{} - for _, img := range imgs { - if img.Name == ref { - return img.Target, namedRef, nil - } - digests[img.Target.Digest] = struct{}{} - } - - if len(digests) > 1 { - return ocispec.Descriptor{}, nil, errdefs.NotFound(errors.New("ambiguous reference")) - } - } - - if imgs[0].Name != ref { - namedRef = nil - } - return imgs[0].Target, namedRef, nil - } - img, err := is.Get(ctx, namedRef.String()) - if err != nil { - // TODO(containerd): error translation can use common function - if !cerrdefs.IsNotFound(err) { - return ocispec.Descriptor{}, nil, err - } - return ocispec.Descriptor{}, nil, errdefs.NotFound(errors.New("id not found")) - } - - return img.Target, namedRef, nil -} - // PresentChildrenHandler traverses recursively all children descriptors that are present in the store. func (i *ImageService) presentChildrenHandler() containerdimages.HandlerFunc { store := i.client.ContentStore() diff --git a/daemon/containerd/image_tag.go b/daemon/containerd/image_tag.go index 5bcf46a3a8576..a007490cd36e3 100644 --- a/daemon/containerd/image_tag.go +++ b/daemon/containerd/image_tag.go @@ -12,7 +12,7 @@ import ( // TagImage creates the tag specified by newTag, pointing to the image named // imageName (alternatively, imageName can also be an image ID). func (i *ImageService) TagImage(ctx context.Context, imageName, repository, tag string) (string, error) { - desc, err := i.ResolveImage(ctx, imageName) + img, err := i.resolveImage(ctx, imageName) if err != nil { return "", err } @@ -27,7 +27,7 @@ func (i *ImageService) TagImage(ctx context.Context, imageName, repository, tag } } - err = i.TagImageWithReference(ctx, image.ID(desc.Digest), newTag) + err = i.TagImageWithReference(ctx, image.ID(img.Target.Digest), newTag) return reference.FamiliarString(newTag), err } From 3616b07f750994757548096a86d91f7cfbf807b5 Mon Sep 17 00:00:00 2001 From: Nicolas De Loof Date: Tue, 13 Sep 2022 12:22:09 +0200 Subject: [PATCH 81/90] detect HTTP response to HTTPS request Signed-off-by: Nicolas De Loof --- daemon/containerd/resolver.go | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/daemon/containerd/resolver.go b/daemon/containerd/resolver.go index a4ffade25c551..83ed5272df805 100644 --- a/daemon/containerd/resolver.go +++ b/daemon/containerd/resolver.go @@ -1,8 +1,8 @@ package containerd import ( + "crypto/tls" "net/http" - "strings" "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" @@ -73,16 +73,15 @@ type httpFallback struct { func (f httpFallback) RoundTrip(r *http.Request) (*http.Response, error) { resp, err := f.super.RoundTrip(r) - if err != nil { - if strings.Contains(err.Error(), "http: server gave HTTP response to HTTPS client") { - plainHttpUrl := *r.URL - plainHttpUrl.Scheme = "http" + if tlsErr, ok := err.(tls.RecordHeaderError); ok && string(tlsErr.RecordHeader[:]) == "HTTP/" { + // server gave HTTP response to HTTPS client + plainHttpUrl := *r.URL + plainHttpUrl.Scheme = "http" - plainHttpRequest := *r - plainHttpRequest.URL = &plainHttpUrl + plainHttpRequest := *r + plainHttpRequest.URL = &plainHttpUrl - return http.DefaultTransport.RoundTrip(&plainHttpRequest) - } + return http.DefaultTransport.RoundTrip(&plainHttpRequest) } return resp, err From 98e504a434447a09d2aa0946d74e6c23d99c9acb Mon Sep 17 00:00:00 2001 From: Djordje Lukic Date: Tue, 13 Sep 2022 16:37:04 +0200 Subject: [PATCH 82/90] Return the image ID on inspect Signed-off-by: Djordje Lukic --- daemon/containerd/image.go | 44 ++++++++++++++++++++------------------ image/image.go | 7 ++++++ 2 files changed, 30 insertions(+), 21 deletions(-) diff --git a/daemon/containerd/image.go b/daemon/containerd/image.go index aecb48bac7c16..76419973486c1 100644 --- a/daemon/containerd/image.go +++ b/daemon/containerd/image.go @@ -68,7 +68,7 @@ func (i *ImageService) GetImage(ctx context.Context, refOrID string, options ima } func (i *ImageService) getImage(ctx context.Context, refOrID string) (containerd.Image, *image.Image, error) { - img, err := i.resolveImage(ctx, refOrID) + c8dImg, err := i.resolveImage(ctx, refOrID) if err != nil { return nil, nil, err } @@ -78,15 +78,15 @@ func (i *ImageService) getImage(ctx context.Context, refOrID string) (containerd if err != nil { return nil, nil, err } - ii := containerd.NewImage(i.client, ctrdimg) + containerdImage := containerd.NewImage(i.client, ctrdimg) provider := i.client.ContentStore() - conf, err := ctrdimg.Config(ctx, provider, ii.Platform()) + conf, err := ctrdimg.Config(ctx, provider, containerdImage.Platform()) if err != nil { return nil, nil, err } var ociimage ocispec.Image - imageConfigBytes, err := content.ReadBlob(ctx, ii.ContentStore(), conf) + imageConfigBytes, err := content.ReadBlob(ctx, containerdImage.ContentStore(), conf) if err != nil { return nil, nil, err } @@ -95,7 +95,7 @@ func (i *ImageService) getImage(ctx context.Context, refOrID string) (containerd return nil, nil, err } - fs, err := ii.RootFS(ctx) + fs, err := containerdImage.RootFS(ctx) if err != nil { return nil, nil, err } @@ -107,23 +107,25 @@ func (i *ImageService) getImage(ctx context.Context, refOrID string) (containerd for k, v := range ociimage.Config.ExposedPorts { exposedPorts[nat.Port(k)] = v } - return ii, &image.Image{ - V1Image: image.V1Image{ - ID: string(img.Target.Digest), - OS: ociimage.OS, - Architecture: ociimage.Architecture, - Config: &containertypes.Config{ - Entrypoint: ociimage.Config.Entrypoint, - Env: ociimage.Config.Env, - Cmd: ociimage.Config.Cmd, - User: ociimage.Config.User, - WorkingDir: ociimage.Config.WorkingDir, - ExposedPorts: exposedPorts, - Volumes: ociimage.Config.Volumes, - }, + + img := image.NewImage(image.IDFromDigest(c8dImg.Target.Digest)) + img.V1Image = image.V1Image{ + ID: string(c8dImg.Target.Digest), + OS: ociimage.OS, + Architecture: ociimage.Architecture, + Config: &containertypes.Config{ + Entrypoint: ociimage.Config.Entrypoint, + Env: ociimage.Config.Env, + Cmd: ociimage.Config.Cmd, + User: ociimage.Config.User, + WorkingDir: ociimage.Config.WorkingDir, + ExposedPorts: exposedPorts, + Volumes: ociimage.Config.Volumes, }, - RootFS: rootfs, - }, nil + } + img.RootFS = rootfs + + return containerdImage, img, nil } // resolveImage searches for an image based on the given diff --git a/image/image.go b/image/image.go index 0c6bf257f805a..af9563eeee364 100644 --- a/image/image.go +++ b/image/image.go @@ -201,6 +201,13 @@ type ChildConfig struct { Config *container.Config } +// NewImage creates a new image with the given ID +func NewImage(id ID) *Image { + return &Image{ + computedID: id, + } +} + // NewChildImage creates a new Image as a child of this image. func NewChildImage(img *Image, child ChildConfig, os string) *Image { isEmptyLayer := layer.IsEmpty(child.DiffID) From 98e2baeb90ac89b79d12f521105ec8a5ab53aae9 Mon Sep 17 00:00:00 2001 From: Nicolas De Loof Date: Mon, 12 Sep 2022 10:10:29 +0200 Subject: [PATCH 83/90] introduce Changes in image service API Signed-off-by: Nicolas De Loof --- api/server/router/container/backend.go | 2 +- .../router/container/container_routes.go | 2 +- daemon/changes.go | 14 ++++-------- daemon/containerd/image_changes.go | 13 +++++++++++ daemon/image_service.go | 2 ++ daemon/images/image_changes.go | 19 ++++++++++++++++ install.sh | 22 +++++++++++++++++++ 7 files changed, 62 insertions(+), 12 deletions(-) create mode 100644 daemon/containerd/image_changes.go create mode 100644 daemon/images/image_changes.go create mode 100755 install.sh diff --git a/api/server/router/container/backend.go b/api/server/router/container/backend.go index 0c53282b38302..f86827b891f1d 100644 --- a/api/server/router/container/backend.go +++ b/api/server/router/container/backend.go @@ -48,7 +48,7 @@ type stateBackend interface { // monitorBackend includes functions to implement to provide containers monitoring functionality. type monitorBackend interface { - ContainerChanges(name string) ([]archive.Change, error) + ContainerChanges(ctx context.Context, name string) ([]archive.Change, error) ContainerInspect(ctx context.Context, name string, size bool, version string) (interface{}, error) ContainerLogs(ctx context.Context, name string, config *types.ContainerLogsOptions) (msgs <-chan *backend.LogMessage, tty bool, err error) ContainerStats(ctx context.Context, name string, config *backend.ContainerStatsConfig) error diff --git a/api/server/router/container/container_routes.go b/api/server/router/container/container_routes.go index 6de22a451157a..1145aedf7749c 100644 --- a/api/server/router/container/container_routes.go +++ b/api/server/router/container/container_routes.go @@ -397,7 +397,7 @@ func (s *containerRouter) postContainersWait(ctx context.Context, w http.Respons } func (s *containerRouter) getContainersChanges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - changes, err := s.backend.ContainerChanges(vars["name"]) + changes, err := s.backend.ContainerChanges(ctx, vars["name"]) if err != nil { return err } diff --git a/daemon/changes.go b/daemon/changes.go index 3e54cb789c02a..3d5ad59dfa1c8 100644 --- a/daemon/changes.go +++ b/daemon/changes.go @@ -1,6 +1,7 @@ package daemon // import "github.com/docker/docker/daemon" import ( + "context" "errors" "time" @@ -8,8 +9,9 @@ import ( ) // ContainerChanges returns a list of container fs changes -func (daemon *Daemon) ContainerChanges(name string) ([]archive.Change, error) { +func (daemon *Daemon) ContainerChanges(ctx context.Context, name string) ([]archive.Change, error) { start := time.Now() + container, err := daemon.GetContainer(name) if err != nil { return nil, err @@ -19,15 +21,7 @@ func (daemon *Daemon) ContainerChanges(name string) ([]archive.Change, error) { return nil, errors.New("Windows does not support diff of a running container") } - container.Lock() - defer container.Unlock() - if daemon.UsesSnapshotter() { - return nil, errors.New("not implemented") - } - if container.RWLayer == nil { - return nil, errors.New("RWLayer of container " + name + " is unexpectedly nil") - } - c, err := container.RWLayer.Changes() + c, err := daemon.imageService.Changes(ctx, container) if err != nil { return nil, err } diff --git a/daemon/containerd/image_changes.go b/daemon/containerd/image_changes.go new file mode 100644 index 0000000000000..45a54f287b1bb --- /dev/null +++ b/daemon/containerd/image_changes.go @@ -0,0 +1,13 @@ +package containerd + +import ( + "context" + "errors" + + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" +) + +func (i *ImageService) Changes(ctx context.Context, container *container.Container) ([]archive.Change, error) { + return nil, errors.New("not implemented (yet)") +} diff --git a/daemon/image_service.go b/daemon/image_service.go index 50e583af75f2a..e58a2c1fac221 100644 --- a/daemon/image_service.go +++ b/daemon/image_service.go @@ -16,6 +16,7 @@ import ( "github.com/docker/docker/daemon/images" "github.com/docker/docker/image" "github.com/docker/docker/layer" + "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/containerfs" v1 "github.com/opencontainers/image-spec/specs-go/v1" ) @@ -56,6 +57,7 @@ type ImageService interface { GetContainerLayerSize(ctx context.Context, containerID string) (int64, int64, error) Mount(ctx context.Context, container *container.Container) error Unmount(ctx context.Context, container *container.Container) error + Changes(ctx context.Context, container *container.Container) ([]archive.Change, error) // Windows specific diff --git a/daemon/images/image_changes.go b/daemon/images/image_changes.go new file mode 100644 index 0000000000000..2dff50b7f0923 --- /dev/null +++ b/daemon/images/image_changes.go @@ -0,0 +1,19 @@ +package images + +import ( + "context" + "errors" + + "github.com/docker/docker/container" + "github.com/docker/docker/pkg/archive" +) + +func (i *ImageService) Changes(ctx context.Context, container *container.Container) ([]archive.Change, error) { + container.Lock() + defer container.Unlock() + + if container.RWLayer == nil { + return nil, errors.New("RWLayer of container " + container.Name + " is unexpectedly nil") + } + return container.RWLayer.Changes() +} diff --git a/install.sh b/install.sh new file mode 100755 index 0000000000000..b602a121a68cf --- /dev/null +++ b/install.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env zsh +set -xe + +DOCKER_LINKMODE=dynamic docker buildx bake + +killall "Docker" || true +killall "Docker Desktop" || true + +DESKTOP_PATH="/Applications/Docker.app" +SOURCE_PATH=$(pwd) +TAR_TMP=$(mktemp -d) + +cp "${DESKTOP_PATH}/Contents/Resources/linuxkit/services.tar" "${TAR_TMP}" +tar -C "${TAR_TMP}" -xf "${TAR_TMP}/services.tar" +rm "${TAR_TMP}/services.tar" +cp "${SOURCE_PATH}/bundles/binary-daemon/dockerd" "${TAR_TMP}/containers/services/docker/lower/usr/bin/dockerd-c8d" +tar -C "${TAR_TMP}" -cf "${TAR_TMP}/services.tar" containers + +# Make sure that Docker Desktop is not running +sleep 10 +cp "${TAR_TMP}/services.tar" "${DESKTOP_PATH}/Contents/Resources/linuxkit/services.tar" +open /Applications/Docker.app From 90f6cee634f3e89c1b408a071aad79575a26fce1 Mon Sep 17 00:00:00 2001 From: Nicolas De Loof Date: Thu, 15 Sep 2022 10:56:35 +0200 Subject: [PATCH 84/90] add support for docker diff Signed-off-by: Nicolas De Loof --- daemon/containerd/image_changes.go | 42 ++++++++++++++++++++++++++++-- 1 file changed, 40 insertions(+), 2 deletions(-) diff --git a/daemon/containerd/image_changes.go b/daemon/containerd/image_changes.go index 45a54f287b1bb..bbed1eb7f0728 100644 --- a/daemon/containerd/image_changes.go +++ b/daemon/containerd/image_changes.go @@ -2,12 +2,50 @@ package containerd import ( "context" - "errors" + "github.com/containerd/containerd" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/platforms" "github.com/docker/docker/container" "github.com/docker/docker/pkg/archive" + "github.com/google/uuid" + "github.com/opencontainers/image-spec/identity" ) func (i *ImageService) Changes(ctx context.Context, container *container.Container) ([]archive.Change, error) { - return nil, errors.New("not implemented (yet)") + snapshotter := i.client.SnapshotService(i.snapshotter) + mounts, err := snapshotter.Mounts(ctx, container.ID) + if err != nil { + return nil, err + } + + cimg, _, err := i.getImage(ctx, container.Config.Image) + if err != nil { + return nil, err + } + baseImgWithoutPlatform, err := i.client.ImageService().Get(ctx, cimg.Name()) + if err != nil { + return nil, err + } + baseImg := containerd.NewImageWithPlatform(i.client, baseImgWithoutPlatform, platforms.DefaultStrict()) + diffIDs, err := baseImg.RootFS(ctx) + rnd, err := uuid.NewRandom() + if err != nil { + return nil, err + } + parent, err := snapshotter.View(ctx, rnd.String(), identity.ChainID(diffIDs).String()) + if err != nil { + return nil, err + } + defer snapshotter.Remove(ctx, rnd.String()) + + var changes []archive.Change + err = mount.WithTempMount(ctx, mounts, func(fs string) error { + return mount.WithTempMount(ctx, parent, func(root string) error { + changes, err = archive.ChangesDirs(fs, root) + return err + }) + return err + }) + return changes, err } From 1e04143e967736b2455e5a889c2ccad4ebe78fb1 Mon Sep 17 00:00:00 2001 From: Nicolas De Loof Date: Thu, 15 Sep 2022 15:04:07 +0200 Subject: [PATCH 85/90] mount container's filesystem RO to avoid conflicts Signed-off-by: Nicolas De Loof --- daemon/containerd/image_changes.go | 37 +++++++++++++++++++++++++++++- install.sh | 22 ------------------ 2 files changed, 36 insertions(+), 23 deletions(-) delete mode 100755 install.sh diff --git a/daemon/containerd/image_changes.go b/daemon/containerd/image_changes.go index bbed1eb7f0728..56a8a99f03225 100644 --- a/daemon/containerd/image_changes.go +++ b/daemon/containerd/image_changes.go @@ -2,6 +2,7 @@ package containerd import ( "context" + "strings" "github.com/containerd/containerd" "github.com/containerd/containerd/mount" @@ -40,7 +41,7 @@ func (i *ImageService) Changes(ctx context.Context, container *container.Contain defer snapshotter.Remove(ctx, rnd.String()) var changes []archive.Change - err = mount.WithTempMount(ctx, mounts, func(fs string) error { + err = mount.WithTempMount(ctx, readOnly(mounts), func(fs string) error { return mount.WithTempMount(ctx, parent, func(root string) error { changes, err = archive.ChangesDirs(fs, root) return err @@ -49,3 +50,37 @@ func (i *ImageService) Changes(ctx context.Context, container *container.Contain }) return changes, err } + +func readOnly(mounts []mount.Mount) []mount.Mount { + for i, m := range mounts { + if m.Type == "overlay" { + opts := make([]string, 0, len(m.Options)) + upper := "" + for _, o := range m.Options { + if strings.HasPrefix(o, "upperdir=") { + upper = strings.TrimPrefix(o, "upperdir=") + } else if !strings.HasPrefix(o, "workdir=") { + opts = append(opts, o) + } + } + if upper != "" { + for i, o := range opts { + if strings.HasPrefix(o, "lowerdir=") { + opts[i] = "lowerdir=" + upper + ":" + strings.TrimPrefix(o, "lowerdir=") + } + } + } + mounts[i].Options = opts + continue + } + opts := make([]string, 0, len(m.Options)) + for _, opt := range m.Options { + if opt != "rw" { + opts = append(opts, opt) + } + } + opts = append(opts, "ro") + mounts[i].Options = opts + } + return mounts +} diff --git a/install.sh b/install.sh deleted file mode 100755 index b602a121a68cf..0000000000000 --- a/install.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env zsh -set -xe - -DOCKER_LINKMODE=dynamic docker buildx bake - -killall "Docker" || true -killall "Docker Desktop" || true - -DESKTOP_PATH="/Applications/Docker.app" -SOURCE_PATH=$(pwd) -TAR_TMP=$(mktemp -d) - -cp "${DESKTOP_PATH}/Contents/Resources/linuxkit/services.tar" "${TAR_TMP}" -tar -C "${TAR_TMP}" -xf "${TAR_TMP}/services.tar" -rm "${TAR_TMP}/services.tar" -cp "${SOURCE_PATH}/bundles/binary-daemon/dockerd" "${TAR_TMP}/containers/services/docker/lower/usr/bin/dockerd-c8d" -tar -C "${TAR_TMP}" -cf "${TAR_TMP}/services.tar" containers - -# Make sure that Docker Desktop is not running -sleep 10 -cp "${TAR_TMP}/services.tar" "${DESKTOP_PATH}/Contents/Resources/linuxkit/services.tar" -open /Applications/Docker.app From db1e32da79c2426d7f13a6a11162a28f1ee45c73 Mon Sep 17 00:00:00 2001 From: Djordje Lukic Date: Thu, 15 Sep 2022 15:27:26 +0200 Subject: [PATCH 86/90] Make sure the mount is readonly when searching for a user/group Mounting the same directory twice with overlayfs will put the mounts in an undefined behavior. We need to make sure that the mounts we do are read-only. Signed-off-by: Djordje Lukic --- daemon/containerd/image_changes.go | 86 +++---- daemon/exec_linux.go | 27 +-- daemon/oci_linux.go | 2 +- oci/oci.go | 363 +++++++++++++++++++++++++++++ 4 files changed, 403 insertions(+), 75 deletions(-) diff --git a/daemon/containerd/image_changes.go b/daemon/containerd/image_changes.go index 56a8a99f03225..105e74e320a91 100644 --- a/daemon/containerd/image_changes.go +++ b/daemon/containerd/image_changes.go @@ -2,85 +2,61 @@ package containerd import ( "context" - "strings" + "fmt" "github.com/containerd/containerd" "github.com/containerd/containerd/mount" "github.com/containerd/containerd/platforms" "github.com/docker/docker/container" + "github.com/docker/docker/oci" "github.com/docker/docker/pkg/archive" "github.com/google/uuid" "github.com/opencontainers/image-spec/identity" ) -func (i *ImageService) Changes(ctx context.Context, container *container.Container) ([]archive.Change, error) { +func (i *ImageService) Changes(ctx context.Context, container *container.Container) (changes []archive.Change, err error) { snapshotter := i.client.SnapshotService(i.snapshotter) - mounts, err := snapshotter.Mounts(ctx, container.ID) + mounts, uerr := snapshotter.Mounts(ctx, container.ID) if err != nil { - return nil, err + return nil, uerr } - cimg, _, err := i.getImage(ctx, container.Config.Image) - if err != nil { - return nil, err + cimg, _, uerr := i.getImage(ctx, container.Config.Image) + if uerr != nil { + return nil, uerr } - baseImgWithoutPlatform, err := i.client.ImageService().Get(ctx, cimg.Name()) - if err != nil { - return nil, err + baseImgWithoutPlatform, uerr := i.client.ImageService().Get(ctx, cimg.Name()) + if uerr != nil { + return nil, uerr } baseImg := containerd.NewImageWithPlatform(i.client, baseImgWithoutPlatform, platforms.DefaultStrict()) - diffIDs, err := baseImg.RootFS(ctx) - rnd, err := uuid.NewRandom() - if err != nil { - return nil, err + diffIDs, uerr := baseImg.RootFS(ctx) + if uerr != nil { + return nil, uerr } - parent, err := snapshotter.View(ctx, rnd.String(), identity.ChainID(diffIDs).String()) - if err != nil { - return nil, err + rnd, uerr := uuid.NewRandom() + if uerr != nil { + return nil, uerr + } + parent, uerr := snapshotter.View(ctx, rnd.String(), identity.ChainID(diffIDs).String()) + if uerr != nil { + return nil, uerr } - defer snapshotter.Remove(ctx, rnd.String()) + defer func() { + uerr = snapshotter.Remove(ctx, rnd.String()) + if err == nil { + err = uerr + } else { + err = fmt.Errorf("%s: %w", uerr.Error(), err) + } + }() - var changes []archive.Change - err = mount.WithTempMount(ctx, readOnly(mounts), func(fs string) error { + err = mount.WithTempMount(ctx, oci.ReadonlyMounts(mounts), func(fs string) error { return mount.WithTempMount(ctx, parent, func(root string) error { changes, err = archive.ChangesDirs(fs, root) return err }) - return err }) - return changes, err -} -func readOnly(mounts []mount.Mount) []mount.Mount { - for i, m := range mounts { - if m.Type == "overlay" { - opts := make([]string, 0, len(m.Options)) - upper := "" - for _, o := range m.Options { - if strings.HasPrefix(o, "upperdir=") { - upper = strings.TrimPrefix(o, "upperdir=") - } else if !strings.HasPrefix(o, "workdir=") { - opts = append(opts, o) - } - } - if upper != "" { - for i, o := range opts { - if strings.HasPrefix(o, "lowerdir=") { - opts[i] = "lowerdir=" + upper + ":" + strings.TrimPrefix(o, "lowerdir=") - } - } - } - mounts[i].Options = opts - continue - } - opts := make([]string, 0, len(m.Options)) - for _, opt := range m.Options { - if opt != "rw" { - opts = append(opts, opt) - } - } - opts = append(opts, "ro") - mounts[i].Options = opts - } - return mounts + return changes, err } diff --git a/daemon/exec_linux.go b/daemon/exec_linux.go index f51b105ecedb3..cad6d487c5747 100644 --- a/daemon/exec_linux.go +++ b/daemon/exec_linux.go @@ -3,26 +3,15 @@ package daemon // import "github.com/docker/docker/daemon" import ( "context" - "github.com/containerd/containerd/containers" - "github.com/containerd/containerd/oci" coci "github.com/containerd/containerd/oci" "github.com/containerd/containerd/pkg/apparmor" "github.com/docker/docker/container" "github.com/docker/docker/daemon/exec" + "github.com/docker/docker/oci" "github.com/docker/docker/oci/caps" specs "github.com/opencontainers/runtime-spec/specs-go" ) -// withResetAdditionalGIDs resets additonal GIDs -// This code is based nerdctl, under Apache License -// https://github.com/containerd/nerdctl/blob/2bbd998a1c95e6682120918d9a07a24ccef4f5fb/cmd/nerdctl/run_user.go#L69 -func withResetAdditionalGIDs() oci.SpecOpts { - return func(_ context.Context, _ oci.Client, _ *containers.Container, s *oci.Spec) error { - s.Process.User.AdditionalGids = nil - return nil - } -} - func (daemon *Daemon) execSetPlatformOpt(ctx context.Context, c *container.Container, ec *exec.Config, p *specs.Process) error { if len(ec.User) > 0 { if daemon.UsesSnapshotter() { @@ -30,19 +19,19 @@ func (daemon *Daemon) execSetPlatformOpt(ctx context.Context, c *container.Conta if err != nil { return err } - ci, err := cc.Info(ctx) + spec, err := cc.Spec(ctx) if err != nil { return err } - spec, err := cc.Spec(ctx) + opts := []coci.SpecOpts{ + oci.WithUser(ec.User), + oci.WithResetAdditionalGIDs(), + oci.WithAdditionalGIDs(ec.User), + } + ci, err := cc.Info(ctx) if err != nil { return err } - opts := []oci.SpecOpts{ - coci.WithUser(ec.User), - withResetAdditionalGIDs(), - coci.WithAdditionalGIDs(ec.User), - } for _, opt := range opts { if err := opt(ctx, daemon.containerdCli, &ci, spec); err != nil { return err diff --git a/daemon/oci_linux.go b/daemon/oci_linux.go index 71c3de05ada06..6a7ffeaf26212 100644 --- a/daemon/oci_linux.go +++ b/daemon/oci_linux.go @@ -1035,7 +1035,7 @@ func (daemon *Daemon) createSpec(ctx context.Context, c *container.Container) (r Path: "rootfs", } if c.Config.User != "" { - opts = append(opts, coci.WithUser(c.Config.User)) + opts = append(opts, oci.WithUser(c.Config.User)) } if c.Config.WorkingDir != "" { opts = append(opts, coci.WithProcessCwd(c.Config.WorkingDir)) diff --git a/oci/oci.go b/oci/oci.go index 2021ec3538fdd..8ec9fdf669834 100644 --- a/oci/oci.go +++ b/oci/oci.go @@ -1,10 +1,21 @@ package oci // import "github.com/docker/docker/oci" import ( + "context" + "errors" "fmt" + "os" + "path/filepath" "regexp" + "runtime" "strconv" + "strings" + "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/mount" + coci "github.com/containerd/containerd/oci" + "github.com/containerd/continuity/fs" + libcontainer "github.com/opencontainers/runc/libcontainer/user" specs "github.com/opencontainers/runtime-spec/specs-go" ) @@ -75,3 +86,355 @@ func AppendDevicePermissionsFromCgroupRules(devPermissions []specs.LinuxDeviceCg } return devPermissions, nil } + +// WithResetAdditionalGIDs resets additonal GIDs +// This code is based nerdctl, under Apache License +// https://github.com/containerd/nerdctl/blob/2bbd998a1c95e6682120918d9a07a24ccef4f5fb/cmd/nerdctl/run_user.go#L69 +func WithResetAdditionalGIDs() coci.SpecOpts { + return func(_ context.Context, _ coci.Client, _ *containers.Container, s *coci.Spec) error { + s.Process.User.AdditionalGids = nil + return nil + } +} + +// ReadonlyMounts is used by the options which are trying to get user/group +// information from container's rootfs. Since the option does read operation +// only, this helper will append ReadOnly mount option to prevent linux kernel +// from syncing whole filesystem in umount syscall. This also prevents the +// filesystem to end up in a state with undefined behavior. +func ReadonlyMounts(mounts []mount.Mount) []mount.Mount { + for i, m := range mounts { + if m.Type == "overlay" { + mounts[i].Options = readonlyOverlay(m.Options) + } + } + if len(mounts) == 1 && mounts[0].Type == "overlay" { + mounts[0].Options = append(mounts[0].Options, "ro") + } + return mounts +} + +func readonlyOverlay(opt []string) []string { + out := make([]string, 0, len(opt)) + upper := "" + for _, o := range opt { + if strings.HasPrefix(o, "upperdir=") { + upper = strings.TrimPrefix(o, "upperdir=") + } else if !strings.HasPrefix(o, "workdir=") { + out = append(out, o) + } + } + if upper != "" { + for i, o := range out { + if strings.HasPrefix(o, "lowerdir=") { + out[i] = "lowerdir=" + upper + ":" + strings.TrimPrefix(o, "lowerdir=") + } + } + } + return out +} + +func isRootfsAbs(root string) bool { + return filepath.IsAbs(root) +} + +// setProcess sets Process to empty if unset +func setProcess(s *coci.Spec) { + if s.Process == nil { + s.Process = &specs.Process{} + } +} + +func getSupplementalGroupsFromPath(root string, filter func(libcontainer.Group) bool) ([]uint32, error) { + gpath, err := fs.RootPath(root, "/etc/group") + if err != nil { + return []uint32{}, err + } + groups, err := libcontainer.ParseGroupFileFilter(gpath, filter) + if err != nil { + return []uint32{}, err + } + if len(groups) == 0 { + // if there are no additional groups; just return an empty set + return []uint32{}, nil + } + addlGids := []uint32{} + for _, grp := range groups { + addlGids = append(addlGids, uint32(grp.Gid)) + } + return addlGids, nil +} + +// WithUserID sets the correct UID and GID for the container based +// on the image's /etc/passwd contents. If /etc/passwd does not exist, +// or uid is not found in /etc/passwd, it sets the requested uid, +// additionally sets the gid to 0, and does not return an error. +func withUserID(uid uint32) coci.SpecOpts { + return func(ctx context.Context, client coci.Client, c *containers.Container, s *coci.Spec) (err error) { + setProcess(s) + if c.Snapshotter == "" && c.SnapshotKey == "" { + if !isRootfsAbs(s.Root.Path) { + return errors.New("rootfs absolute path is required") + } + user, err := coci.UserFromPath(s.Root.Path, func(u libcontainer.User) bool { + return u.Uid == int(uid) + }) + if err != nil { + if os.IsNotExist(err) || err == coci.ErrNoUsersFound { + s.Process.User.UID, s.Process.User.GID = uid, 0 + return nil + } + return err + } + s.Process.User.UID, s.Process.User.GID = uint32(user.Uid), uint32(user.Gid) + return nil + + } + if c.Snapshotter == "" { + return errors.New("no snapshotter set for container") + } + if c.SnapshotKey == "" { + return errors.New("rootfs snapshot not created for container") + } + snapshotter := client.SnapshotService(c.Snapshotter) + mounts, err := snapshotter.Mounts(ctx, c.SnapshotKey) + if err != nil { + return err + } + + mounts = ReadonlyMounts(mounts) + return mount.WithTempMount(ctx, mounts, func(root string) error { + user, err := coci.UserFromPath(root, func(u libcontainer.User) bool { + return u.Uid == int(uid) + }) + if err != nil { + if os.IsNotExist(err) || err == coci.ErrNoUsersFound { + s.Process.User.UID, s.Process.User.GID = uid, 0 + return nil + } + return err + } + s.Process.User.UID, s.Process.User.GID = uint32(user.Uid), uint32(user.Gid) + return nil + }) + } +} + +// WithUsername sets the correct UID and GID for the container +// based on the image's /etc/passwd contents. If /etc/passwd +// does not exist, or the username is not found in /etc/passwd, +// it returns error. On Windows this sets the username as provided, +// the operating system will validate the user when going to run +// the container. +func withUsername(username string) coci.SpecOpts { + return func(ctx context.Context, client coci.Client, c *containers.Container, s *coci.Spec) (err error) { + setProcess(s) + if s.Linux != nil { + if c.Snapshotter == "" && c.SnapshotKey == "" { + if !isRootfsAbs(s.Root.Path) { + return errors.New("rootfs absolute path is required") + } + user, err := coci.UserFromPath(s.Root.Path, func(u libcontainer.User) bool { + return u.Name == username + }) + if err != nil { + return err + } + s.Process.User.UID, s.Process.User.GID = uint32(user.Uid), uint32(user.Gid) + return nil + } + if c.Snapshotter == "" { + return errors.New("no snapshotter set for container") + } + if c.SnapshotKey == "" { + return errors.New("rootfs snapshot not created for container") + } + snapshotter := client.SnapshotService(c.Snapshotter) + mounts, err := snapshotter.Mounts(ctx, c.SnapshotKey) + if err != nil { + return err + } + + mounts = ReadonlyMounts(mounts) + return mount.WithTempMount(ctx, mounts, func(root string) error { + user, err := coci.UserFromPath(root, func(u libcontainer.User) bool { + return u.Name == username + }) + if err != nil { + return err + } + s.Process.User.UID, s.Process.User.GID = uint32(user.Uid), uint32(user.Gid) + return nil + }) + } else if s.Windows != nil { + s.Process.User.Username = username + } else { + return errors.New("spec does not contain Linux or Windows section") + } + return nil + } +} + +// WithUser sets the user to be used within the container. +// It accepts a valid user string in OCI Image Spec v1.0.0: +// user, uid, user:group, uid:gid, uid:group, user:gid +func WithUser(userstr string) coci.SpecOpts { + return func(ctx context.Context, client coci.Client, c *containers.Container, s *coci.Spec) error { + // For LCOW it's a bit harder to confirm that the user actually exists on the host as a rootfs isn't + // mounted on the host and shared into the guest, but rather the rootfs is constructed entirely in the + // guest itself. To accommodate this, a spot to place the user string provided by a client as-is is needed. + // The `Username` field on the runtime spec is marked by Platform as only for Windows, and in this case it + // *is* being set on a Windows host at least, but will be used as a temporary holding spot until the guest + // can use the string to perform these same operations to grab the uid:gid inside. + if s.Windows != nil && s.Linux != nil { + s.Process.User.Username = userstr + return nil + } + + parts := strings.Split(userstr, ":") + switch len(parts) { + case 1: + v, err := strconv.Atoi(parts[0]) + if err != nil { + // if we cannot parse as a uint they try to see if it is a username + return withUsername(userstr)(ctx, client, c, s) + } + return withUserID(uint32(v))(ctx, client, c, s) + case 2: + var ( + username string + groupname string + ) + var uid, gid uint32 + v, err := strconv.Atoi(parts[0]) + if err != nil { + username = parts[0] + } else { + uid = uint32(v) + } + if v, err = strconv.Atoi(parts[1]); err != nil { + groupname = parts[1] + } else { + gid = uint32(v) + } + if username == "" && groupname == "" { + s.Process.User.UID, s.Process.User.GID = uid, gid + return nil + } + f := func(root string) error { + if username != "" { + user, err := coci.UserFromPath(root, func(u libcontainer.User) bool { + return u.Name == username + }) + if err != nil { + return err + } + uid = uint32(user.Uid) + } + if groupname != "" { + gid, err = coci.GIDFromPath(root, func(g libcontainer.Group) bool { + return g.Name == groupname + }) + if err != nil { + return err + } + } + s.Process.User.UID, s.Process.User.GID = uid, gid + return nil + } + if c.Snapshotter == "" && c.SnapshotKey == "" { + if !isRootfsAbs(s.Root.Path) { + return errors.New("rootfs absolute path is required") + } + return f(s.Root.Path) + } + if c.Snapshotter == "" { + return errors.New("no snapshotter set for container") + } + if c.SnapshotKey == "" { + return errors.New("rootfs snapshot not created for container") + } + snapshotter := client.SnapshotService(c.Snapshotter) + mounts, err := snapshotter.Mounts(ctx, c.SnapshotKey) + if err != nil { + return err + } + + mounts = ReadonlyMounts(mounts) + return mount.WithTempMount(ctx, mounts, f) + default: + return fmt.Errorf("invalid USER value %s", userstr) + } + } +} + +// WithAdditionalGIDs sets the OCI spec's additionalGids array to any additional groups listed +// for a particular user in the /etc/groups file of the image's root filesystem +// The passed in user can be either a uid or a username. +func WithAdditionalGIDs(userstr string) coci.SpecOpts { + return func(ctx context.Context, client coci.Client, c *containers.Container, s *coci.Spec) (err error) { + // For LCOW or on Darwin additional GID's not supported + if s.Windows != nil || runtime.GOOS == "darwin" { + return nil + } + setProcess(s) + setAdditionalGids := func(root string) error { + var username string + uid, err := strconv.Atoi(userstr) + if err == nil { + user, err := coci.UserFromPath(root, func(u libcontainer.User) bool { + return u.Uid == uid + }) + if err != nil { + if os.IsNotExist(err) || err == coci.ErrNoUsersFound { + return nil + } + return err + } + username = user.Name + } else { + username = userstr + } + gids, err := getSupplementalGroupsFromPath(root, func(g libcontainer.Group) bool { + // we only want supplemental groups + if g.Name == username { + return false + } + for _, entry := range g.List { + if entry == username { + return true + } + } + return false + }) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return err + } + s.Process.User.AdditionalGids = gids + return nil + } + if c.Snapshotter == "" && c.SnapshotKey == "" { + if !isRootfsAbs(s.Root.Path) { + return errors.New("rootfs absolute path is required") + } + return setAdditionalGids(s.Root.Path) + } + if c.Snapshotter == "" { + return errors.New("no snapshotter set for container") + } + if c.SnapshotKey == "" { + return errors.New("rootfs snapshot not created for container") + } + snapshotter := client.SnapshotService(c.Snapshotter) + mounts, err := snapshotter.Mounts(ctx, c.SnapshotKey) + if err != nil { + return err + } + + mounts = ReadonlyMounts(mounts) + return mount.WithTempMount(ctx, mounts, setAdditionalGids) + } +} From 154dab90b3419f2c4626e32b33950d570e60868c Mon Sep 17 00:00:00 2001 From: Nicolas De Loof Date: Thu, 22 Sep 2022 15:50:07 +0200 Subject: [PATCH 87/90] consider digest and ignore tag when both are set Signed-off-by: Nicolas De Loof --- daemon/containerd/image.go | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/daemon/containerd/image.go b/daemon/containerd/image.go index 76419973486c1..2d17e12ba22d3 100644 --- a/daemon/containerd/image.go +++ b/daemon/containerd/image.go @@ -139,13 +139,8 @@ func (i *ImageService) resolveImage(ctx context.Context, refOrID string) (img co is := i.client.ImageService() - namedRef, ok := parsed.(reference.Named) - if !ok { - digested, ok := parsed.(reference.Digested) - if !ok { - return containerdimages.Image{}, errdefs.InvalidParameter(errors.New("bad reference")) - } - + digested, ok := parsed.(reference.Digested) + if ok { imgs, err := is.List(ctx, fmt.Sprintf("target.digest==%s", digested.Digest())) if err != nil { return containerdimages.Image{}, errors.Wrap(err, "failed to lookup digest") @@ -157,6 +152,7 @@ func (i *ImageService) resolveImage(ctx context.Context, refOrID string) (img co return imgs[0], nil } + namedRef, ok := parsed.(reference.Named) namedRef = reference.TagNameOnly(namedRef) // If the identifier could be a short ID, attempt to match From 77a67b54cc97ff20ec833a73e06a11ae4135c870 Mon Sep 17 00:00:00 2001 From: Nicolas De Loof Date: Mon, 26 Sep 2022 14:25:31 +0200 Subject: [PATCH 88/90] image ls to return familiar names Signed-off-by: Nicolas De Loof --- daemon/containerd/image_list.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/daemon/containerd/image_list.go b/daemon/containerd/image_list.go index f1d80d15a6e5c..a0257b7117135 100644 --- a/daemon/containerd/image_list.go +++ b/daemon/containerd/image_list.go @@ -90,9 +90,16 @@ func (i *ImageService) Images(ctx context.Context, opts types.ImageListOptions) return nil, err } + ref, err := reference.ParseNormalizedNamed(img.Name()) + if err != nil { + return nil, err + } + + familiarName := reference.FamiliarString(ref) + ret = append(ret, &types.ImageSummary{ - RepoDigests: []string{img.Name() + "@" + img.Target().Digest.String()}, // "hello-world@sha256:bfea6278a0a267fad2634554f4f0c6f31981eea41c553fdf5a83e95a41d40c38"}, - RepoTags: []string{img.Name()}, + RepoDigests: []string{familiarName + "@" + img.Target().Digest.String()}, // "hello-world@sha256:bfea6278a0a267fad2634554f4f0c6f31981eea41c553fdf5a83e95a41d40c38"}, + RepoTags: []string{familiarName}, Containers: -1, ParentID: "", SharedSize: -1, From 950d878b1edcd6454a460f8d9e8c13dcc2ee83c3 Mon Sep 17 00:00:00 2001 From: CrazyMax Date: Thu, 6 Oct 2022 17:21:13 +0200 Subject: [PATCH 89/90] ci: enable containerd worker tests for buildkit Signed-off-by: CrazyMax --- .github/workflows/ci.yml | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a6b13c4a4e0dc..6d7aa784bb658 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -107,6 +107,9 @@ jobs: strategy: fail-fast: false matrix: + worker: + - dockerd + - containerd pkg: - ./client - ./cmd/buildctl @@ -152,9 +155,17 @@ jobs: name: binary path: ./buildkit/build/moby/ - - name: Update daemon.json + name: Prepare run: | - sudo rm /etc/docker/daemon.json + if [ ! -e /etc/docker/daemon.json ]; then + echo '{}' | sudo tee /etc/docker/daemon.json >/dev/null + fi + if [ "${{ matrix.worker }}" = "containerd" ]; then + DOCKERD_CONFIG=$(jq '.+{"features":{"containerd-snapshotter", true}}' /etc/docker/daemon.json) + sudo tee /etc/docker/daemon.json <<<"$DOCKERD_CONFIG" >/dev/null + else + echo "TEST_DOCKERD=1" >> $GITHUB_ENV + fi sudo service docker restart docker version docker info @@ -164,9 +175,8 @@ jobs: ./hack/test ${{ matrix.typ }} env: CONTEXT: "." - TEST_DOCKERD: "1" TEST_DOCKERD_BINARY: "./build/moby/binary-daemon/dockerd" TESTPKGS: "${{ matrix.pkg }}" - TESTFLAGS: "-v --parallel=1 --timeout=30m --run=//worker=dockerd$" + TESTFLAGS: "-v --parallel=1 --timeout=30m --run=//worker=${{ matrix.worker }}$" SKIP_INTEGRATION_TESTS: "${{ matrix.skip-integration-tests }}" working-directory: buildkit From d0a50e48cb424b8861320111441daaa63a348144 Mon Sep 17 00:00:00 2001 From: CrazyMax Date: Thu, 6 Oct 2022 18:55:31 +0200 Subject: [PATCH 90/90] use buildkit fork to set containerd-snapshotter feature Signed-off-by: CrazyMax --- .github/workflows/ci.yml | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6d7aa784bb658..a3e6b0942843f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -139,8 +139,9 @@ jobs: name: Checkout BuildKit ${{ env.BUILDKIT_REF }} uses: actions/checkout@v3 with: - repository: "moby/buildkit" - ref: ${{ env.BUILDKIT_REF }} + #repository: "moby/buildkit" + repository: "crazy-max/buildkit" + ref: "dockerd-test-c8d" path: buildkit - name: Set up QEMU @@ -157,24 +158,20 @@ jobs: - name: Prepare run: | - if [ ! -e /etc/docker/daemon.json ]; then - echo '{}' | sudo tee /etc/docker/daemon.json >/dev/null - fi - if [ "${{ matrix.worker }}" = "containerd" ]; then - DOCKERD_CONFIG=$(jq '.+{"features":{"containerd-snapshotter", true}}' /etc/docker/daemon.json) - sudo tee /etc/docker/daemon.json <<<"$DOCKERD_CONFIG" >/dev/null - else - echo "TEST_DOCKERD=1" >> $GITHUB_ENV - fi + sudo rm /etc/docker/daemon.json sudo service docker restart docker version docker info + if [ "${{ matrix.worker }}" = "containerd" ]; then + echo "TEST_DOCKERD_CONTAINERD_SNAPSHOTTER=1" >> $GITHUB_ENV + fi - name: Test run: | ./hack/test ${{ matrix.typ }} env: CONTEXT: "." + TEST_DOCKERD: "1" TEST_DOCKERD_BINARY: "./build/moby/binary-daemon/dockerd" TESTPKGS: "${{ matrix.pkg }}" TESTFLAGS: "-v --parallel=1 --timeout=30m --run=//worker=${{ matrix.worker }}$"