diff --git a/cli/command/image/build.go b/cli/command/image/build.go index 19865717a61c..11c4acb26606 100644 --- a/cli/command/image/build.go +++ b/cli/command/image/build.go @@ -378,13 +378,13 @@ func runBuild(dockerCli command.Cli, options buildOptions) error { if s != nil { go func() { - logrus.Debugf("running session: %v", s.UUID()) + logrus.Debugf("running session: %v", s.ID()) if err := s.Run(ctx, dockerCli.Client().DialSession); err != nil { logrus.Error(err) cancel() // cancel progress context } }() - buildOptions.SessionID = s.UUID() + buildOptions.SessionID = s.ID() } response, err := dockerCli.Client().ImageBuild(ctx, body, buildOptions) diff --git a/cli/command/image/build_session.go b/cli/command/image/build_session.go index 4281f9fe6dc2..a73948c8839f 100644 --- a/cli/command/image/build_session.go +++ b/cli/command/image/build_session.go @@ -21,6 +21,7 @@ import ( "github.com/moby/buildkit/session" "github.com/moby/buildkit/session/filesync" "github.com/pkg/errors" + "github.com/tonistiigi/fsutil" "golang.org/x/time/rate" ) @@ -52,8 +53,20 @@ func addDirToSession(session *session.Session, contextDir string, progressOutput } p := &sizeProgress{out: progressOutput, action: "Streaming build context to Docker daemon"} + mapUIDAndGID := func(s *fsutil.Stat) bool { + s.Uid = uint32(0) + s.Gid = uint32(0) - workdirProvider := filesync.NewFSSyncProvider(contextDir, excludes) + return true + } + + workdirProvider := filesync.NewFSSyncProvider([]filesync.SyncedDir{ + { + Dir: contextDir, + Excludes: excludes, + Map: mapUIDAndGID, + }, + }) session.Allow(workdirProvider) // this will be replaced on parallel build jobs. keep the current diff --git a/cli/command/image/build_session_test.go b/cli/command/image/build_session_test.go new file mode 100644 index 000000000000..1f20b5e48eec --- /dev/null +++ b/cli/command/image/build_session_test.go @@ -0,0 +1,37 @@ +package image + +import ( + "bytes" + "path/filepath" + "testing" + + "github.com/docker/docker/pkg/streamformatter" + "github.com/gotestyourself/gotestyourself/fs" + "github.com/moby/buildkit/session" + "github.com/stretchr/testify/require" +) + +func TestAddDirToSession(t *testing.T) { + dest := fs.NewDir(t, "test-build-session", + fs.WithFile("Dockerfile", ` + FROM alpine:3.6 + COPY foo / + `), + fs.WithFile("foo", "some content", fs.AsUser(65534, 65534)), + ) + defer dest.Remove() + + contextDir := dest.Path() + sharedKey, err := getBuildSharedKey(contextDir) + require.NoError(t, err) + + var s *session.Session + s, err = session.NewSession(filepath.Base(contextDir), sharedKey) + require.NoError(t, err) + + syncDone := make(chan error) + progressOutput := streamformatter.NewProgressOutput(new(bytes.Buffer)) + err = addDirToSession(s, contextDir, progressOutput, syncDone) + // Needs some assertions here to ensure we reset uid/gid to 0 for example + require.NoError(t, err) +} diff --git a/vendor.conf b/vendor.conf index c5784f35fd73..fef9fbd261c0 100755 --- a/vendor.conf +++ b/vendor.conf @@ -26,7 +26,8 @@ github.com/mattn/go-shellwords v1.0.3 github.com/Microsoft/go-winio v0.4.4 github.com/miekg/pkcs11 df8ae6ca730422dba20c768ff38ef7d79077a59f github.com/mitchellh/mapstructure f3009df150dadf309fdee4a54ed65c124afad715 -github.com/moby/buildkit da2b9dc7dab99e824b2b1067ad7d0523e32dd2d9 https://github.com/dmcgowan/buildkit.git +github.com/moby/buildkit c2dbdeb457ea665699a5d97f79eebfac4ab4726f https://github.com/tonistiigi/buildkit.git +github.com/tonistiigi/fsutil 1dedf6e90084bd88c4c518a15e68a37ed1370203 github.com/Nvveen/Gotty a8b993ba6abdb0e0c12b0125c603323a71c7790c https://github.com/ijc25/Gotty github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448 github.com/opencontainers/image-spec v1.0.0 @@ -40,7 +41,6 @@ github.com/spf13/cobra v1.5.1 https://github.com/dnephin/cobra.git github.com/spf13/pflag 9ff6c6923cfffbcd502984b8e0c80539a94968b7 github.com/stevvooe/continuity cd7a8e21e2b6f84799f5dd4b65faf49c8d3ee02d github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987 -github.com/tonistiigi/fsutil 0ac4c11b053b9c5c7c47558f81f96c7100ce50fb github.com/xeipuuv/gojsonpointer e0fe6f68307607d540ed8eac07a342c33fa1b54a github.com/xeipuuv/gojsonreference e02fc20de94c78484cd5ffb007f8af96be030a45 github.com/xeipuuv/gojsonschema 93e72a773fade158921402d6a24c819b48aba29d diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go deleted file mode 100644 index 7604418767d1..000000000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go +++ /dev/null @@ -1,70 +0,0 @@ -package chrootarchive - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/idtools" -) - -// NewArchiver returns a new Archiver which uses chrootarchive.Untar -func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver { - if idMappings == nil { - idMappings = &idtools.IDMappings{} - } - return &archive.Archiver{Untar: Untar, IDMappings: idMappings} -} - -// Untar reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive may be compressed with one of the following algorithms: -// identity (uncompressed), gzip, bzip2, xz. -func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { - return untarHandler(tarArchive, dest, options, true) -} - -// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive must be an uncompressed stream. -func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error { - return untarHandler(tarArchive, dest, options, false) -} - -// Handler for teasing out the automatic decompression -func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error { - if tarArchive == nil { - return fmt.Errorf("Empty archive") - } - if options == nil { - options = &archive.TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - - idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) - rootIDs := idMappings.RootPair() - - dest = filepath.Clean(dest) - if _, err := os.Stat(dest); os.IsNotExist(err) { - if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil { - return err - } - } - - r := ioutil.NopCloser(tarArchive) - if decompress { - decompressedArchive, err := archive.DecompressStream(tarArchive) - if err != nil { - return err - } - defer decompressedArchive.Close() - r = decompressedArchive - } - - return invokeUnpack(r, dest, options) -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go deleted file mode 100644 index f2325abd74e4..000000000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go +++ /dev/null @@ -1,86 +0,0 @@ -// +build !windows - -package chrootarchive - -import ( - "bytes" - "encoding/json" - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "runtime" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/reexec" -) - -// untar is the entry-point for docker-untar on re-exec. This is not used on -// Windows as it does not support chroot, hence no point sandboxing through -// chroot and rexec. -func untar() { - runtime.LockOSThread() - flag.Parse() - - var options *archive.TarOptions - - //read the options from the pipe "ExtraFiles" - if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { - fatal(err) - } - - if err := chroot(flag.Arg(0)); err != nil { - fatal(err) - } - - if err := archive.Unpack(os.Stdin, "/", options); err != nil { - fatal(err) - } - // fully consume stdin in case it is zero padded - if _, err := flush(os.Stdin); err != nil { - fatal(err) - } - - os.Exit(0) -} - -func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error { - - // We can't pass a potentially large exclude list directly via cmd line - // because we easily overrun the kernel's max argument/environment size - // when the full image list is passed (e.g. when this is used by - // `docker load`). We will marshall the options via a pipe to the - // child - r, w, err := os.Pipe() - if err != nil { - return fmt.Errorf("Untar pipe failure: %v", err) - } - - cmd := reexec.Command("docker-untar", dest) - cmd.Stdin = decompressedArchive - - cmd.ExtraFiles = append(cmd.ExtraFiles, r) - output := bytes.NewBuffer(nil) - cmd.Stdout = output - cmd.Stderr = output - - if err := cmd.Start(); err != nil { - return fmt.Errorf("Untar error on re-exec cmd: %v", err) - } - //write the options to the pipe for the untar exec to read - if err := json.NewEncoder(w).Encode(options); err != nil { - return fmt.Errorf("Untar json encode to pipe failed: %v", err) - } - w.Close() - - if err := cmd.Wait(); err != nil { - // when `xz -d -c -q | docker-untar ...` failed on docker-untar side, - // we need to exhaust `xz`'s output, otherwise the `xz` side will be - // pending on write pipe forever - io.Copy(ioutil.Discard, decompressedArchive) - - return fmt.Errorf("Error processing tar file(%v): %s", err, output) - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go deleted file mode 100644 index 0a500ed5c2dd..000000000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go +++ /dev/null @@ -1,22 +0,0 @@ -package chrootarchive - -import ( - "io" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/longpath" -) - -// chroot is not supported by Windows -func chroot(path string) error { - return nil -} - -func invokeUnpack(decompressedArchive io.ReadCloser, - dest string, - options *archive.TarOptions) error { - // Windows is different to Linux here because Windows does not support - // chroot. Hence there is no point sandboxing a chrooted process to - // do the unpack. We call inline instead within the daemon process. - return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options) -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go deleted file mode 100644 index ebc3b84466d1..000000000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go +++ /dev/null @@ -1,108 +0,0 @@ -package chrootarchive - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "github.com/docker/docker/pkg/mount" - rsystem "github.com/opencontainers/runc/libcontainer/system" - "golang.org/x/sys/unix" -) - -// chroot on linux uses pivot_root instead of chroot -// pivot_root takes a new root and an old root. -// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root. -// New root is where the new rootfs is set to. -// Old root is removed after the call to pivot_root so it is no longer available under the new root. -// This is similar to how libcontainer sets up a container's rootfs -func chroot(path string) (err error) { - // if the engine is running in a user namespace we need to use actual chroot - if rsystem.RunningInUserNS() { - return realChroot(path) - } - if err := unix.Unshare(unix.CLONE_NEWNS); err != nil { - return fmt.Errorf("Error creating mount namespace before pivot: %v", err) - } - - // make everything in new ns private - if err := mount.MakeRPrivate("/"); err != nil { - return err - } - - if mounted, _ := mount.Mounted(path); !mounted { - if err := mount.Mount(path, path, "bind", "rbind,rw"); err != nil { - return realChroot(path) - } - } - - // setup oldRoot for pivot_root - pivotDir, err := ioutil.TempDir(path, ".pivot_root") - if err != nil { - return fmt.Errorf("Error setting up pivot dir: %v", err) - } - - var mounted bool - defer func() { - if mounted { - // make sure pivotDir is not mounted before we try to remove it - if errCleanup := unix.Unmount(pivotDir, unix.MNT_DETACH); errCleanup != nil { - if err == nil { - err = errCleanup - } - return - } - } - - errCleanup := os.Remove(pivotDir) - // pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful - // because we already cleaned it up on failed pivot_root - if errCleanup != nil && !os.IsNotExist(errCleanup) { - errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup) - if err == nil { - err = errCleanup - } - } - }() - - if err := unix.PivotRoot(path, pivotDir); err != nil { - // If pivot fails, fall back to the normal chroot after cleaning up temp dir - if err := os.Remove(pivotDir); err != nil { - return fmt.Errorf("Error cleaning up after failed pivot: %v", err) - } - return realChroot(path) - } - mounted = true - - // This is the new path for where the old root (prior to the pivot) has been moved to - // This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction - pivotDir = filepath.Join("/", filepath.Base(pivotDir)) - - if err := unix.Chdir("/"); err != nil { - return fmt.Errorf("Error changing to new root: %v", err) - } - - // Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host - if err := unix.Mount("", pivotDir, "", unix.MS_PRIVATE|unix.MS_REC, ""); err != nil { - return fmt.Errorf("Error making old root private after pivot: %v", err) - } - - // Now unmount the old root so it's no longer visible from the new root - if err := unix.Unmount(pivotDir, unix.MNT_DETACH); err != nil { - return fmt.Errorf("Error while unmounting old root after pivot: %v", err) - } - mounted = false - - return nil -} - -func realChroot(path string) error { - if err := unix.Chroot(path); err != nil { - return fmt.Errorf("Error after fallback to chroot: %v", err) - } - if err := unix.Chdir("/"); err != nil { - return fmt.Errorf("Error changing to new root after chroot: %v", err) - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go deleted file mode 100644 index f9b5dece8c97..000000000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !windows,!linux - -package chrootarchive - -import "golang.org/x/sys/unix" - -func chroot(path string) error { - if err := unix.Chroot(path); err != nil { - return err - } - return unix.Chdir("/") -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go deleted file mode 100644 index 49acad79ff28..000000000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go +++ /dev/null @@ -1,23 +0,0 @@ -package chrootarchive - -import ( - "io" - - "github.com/docker/docker/pkg/archive" -) - -// ApplyLayer parses a diff in the standard layer format from `layer`, -// and applies it to the directory `dest`. The stream `layer` can only be -// uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyLayer(dest string, layer io.Reader) (size int64, err error) { - return applyLayerHandler(dest, layer, &archive.TarOptions{}, true) -} - -// ApplyUncompressedLayer parses a diff in the standard layer format from -// `layer`, and applies it to the directory `dest`. The stream `layer` -// can only be uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) { - return applyLayerHandler(dest, layer, options, false) -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go deleted file mode 100644 index 33098b33e829..000000000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go +++ /dev/null @@ -1,130 +0,0 @@ -//+build !windows - -package chrootarchive - -import ( - "bytes" - "encoding/json" - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/reexec" - "github.com/docker/docker/pkg/system" - rsystem "github.com/opencontainers/runc/libcontainer/system" -) - -type applyLayerResponse struct { - LayerSize int64 `json:"layerSize"` -} - -// applyLayer is the entry-point for docker-applylayer on re-exec. This is not -// used on Windows as it does not support chroot, hence no point sandboxing -// through chroot and rexec. -func applyLayer() { - - var ( - tmpDir string - err error - options *archive.TarOptions - ) - runtime.LockOSThread() - flag.Parse() - - inUserns := rsystem.RunningInUserNS() - if err := chroot(flag.Arg(0)); err != nil { - fatal(err) - } - - // We need to be able to set any perms - oldmask, err := system.Umask(0) - defer system.Umask(oldmask) - if err != nil { - fatal(err) - } - - if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil { - fatal(err) - } - - if inUserns { - options.InUserNS = true - } - - if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil { - fatal(err) - } - - os.Setenv("TMPDIR", tmpDir) - size, err := archive.UnpackLayer("/", os.Stdin, options) - os.RemoveAll(tmpDir) - if err != nil { - fatal(err) - } - - encoder := json.NewEncoder(os.Stdout) - if err := encoder.Encode(applyLayerResponse{size}); err != nil { - fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err)) - } - - if _, err := flush(os.Stdin); err != nil { - fatal(err) - } - - os.Exit(0) -} - -// applyLayerHandler parses a diff in the standard layer format from `layer`, and -// applies it to the directory `dest`. Returns the size in bytes of the -// contents of the layer. -func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { - dest = filepath.Clean(dest) - if decompress { - decompressed, err := archive.DecompressStream(layer) - if err != nil { - return 0, err - } - defer decompressed.Close() - - layer = decompressed - } - if options == nil { - options = &archive.TarOptions{} - if rsystem.RunningInUserNS() { - options.InUserNS = true - } - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - - data, err := json.Marshal(options) - if err != nil { - return 0, fmt.Errorf("ApplyLayer json encode: %v", err) - } - - cmd := reexec.Command("docker-applyLayer", dest) - cmd.Stdin = layer - cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) - - outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) - cmd.Stdout, cmd.Stderr = outBuf, errBuf - - if err = cmd.Run(); err != nil { - return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf) - } - - // Stdout should be a valid JSON struct representing an applyLayerResponse. - response := applyLayerResponse{} - decoder := json.NewDecoder(outBuf) - if err = decoder.Decode(&response); err != nil { - return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err) - } - - return response.LayerSize, nil -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go deleted file mode 100644 index dc07eb680dd8..000000000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go +++ /dev/null @@ -1,45 +0,0 @@ -package chrootarchive - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/longpath" -) - -// applyLayerHandler parses a diff in the standard layer format from `layer`, and -// applies it to the directory `dest`. Returns the size in bytes of the -// contents of the layer. -func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { - dest = filepath.Clean(dest) - - // Ensure it is a Windows-style volume path - dest = longpath.AddPrefix(dest) - - if decompress { - decompressed, err := archive.DecompressStream(layer) - if err != nil { - return 0, err - } - defer decompressed.Close() - - layer = decompressed - } - - tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract") - if err != nil { - return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err) - } - - s, err := archive.UnpackLayer(dest, layer, nil) - os.RemoveAll(tmpDir) - if err != nil { - return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err) - } - - return s, nil -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go deleted file mode 100644 index 4f637f17b8f5..000000000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build !windows - -package chrootarchive - -import ( - "fmt" - "io" - "io/ioutil" - "os" - - "github.com/docker/docker/pkg/reexec" -) - -func init() { - reexec.Register("docker-applyLayer", applyLayer) - reexec.Register("docker-untar", untar) -} - -func fatal(err error) { - fmt.Fprint(os.Stderr, err) - os.Exit(1) -} - -// flush consumes all the bytes from the reader discarding -// any errors -func flush(r io.Reader) (bytes int64, err error) { - return io.Copy(ioutil.Discard, r) -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go deleted file mode 100644 index fa17c9bf8316..000000000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go +++ /dev/null @@ -1,4 +0,0 @@ -package chrootarchive - -func init() { -} diff --git a/vendor/github.com/docker/docker/pkg/reexec/README.md b/vendor/github.com/docker/docker/pkg/reexec/README.md deleted file mode 100644 index 6658f69b69d7..000000000000 --- a/vendor/github.com/docker/docker/pkg/reexec/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# reexec - -The `reexec` package facilitates the busybox style reexec of the docker binary that we require because -of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of -the exec of the binary will be used to find and execute custom init paths. diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_linux.go b/vendor/github.com/docker/docker/pkg/reexec/command_linux.go deleted file mode 100644 index 05319eacc93d..000000000000 --- a/vendor/github.com/docker/docker/pkg/reexec/command_linux.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build linux - -package reexec - -import ( - "os/exec" - "syscall" - - "golang.org/x/sys/unix" -) - -// Self returns the path to the current process's binary. -// Returns "/proc/self/exe". -func Self() string { - return "/proc/self/exe" -} - -// Command returns *exec.Cmd which has Path as current binary. Also it setting -// SysProcAttr.Pdeathsig to SIGTERM. -// This will use the in-memory version (/proc/self/exe) of the current binary, -// it is thus safe to delete or replace the on-disk binary (os.Args[0]). -func Command(args ...string) *exec.Cmd { - return &exec.Cmd{ - Path: Self(), - Args: args, - SysProcAttr: &syscall.SysProcAttr{ - Pdeathsig: unix.SIGTERM, - }, - } -} diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_unix.go b/vendor/github.com/docker/docker/pkg/reexec/command_unix.go deleted file mode 100644 index 778a720e3b91..000000000000 --- a/vendor/github.com/docker/docker/pkg/reexec/command_unix.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build freebsd solaris darwin - -package reexec - -import ( - "os/exec" -) - -// Self returns the path to the current process's binary. -// Uses os.Args[0]. -func Self() string { - return naiveSelf() -} - -// Command returns *exec.Cmd which has Path as current binary. -// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will -// be set to "/usr/bin/docker". -func Command(args ...string) *exec.Cmd { - return &exec.Cmd{ - Path: Self(), - Args: args, - } -} diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go b/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go deleted file mode 100644 index 76edd824273e..000000000000 --- a/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !linux,!windows,!freebsd,!solaris,!darwin - -package reexec - -import ( - "os/exec" -) - -// Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. -func Command(args ...string) *exec.Cmd { - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_windows.go b/vendor/github.com/docker/docker/pkg/reexec/command_windows.go deleted file mode 100644 index ca871c4227ed..000000000000 --- a/vendor/github.com/docker/docker/pkg/reexec/command_windows.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build windows - -package reexec - -import ( - "os/exec" -) - -// Self returns the path to the current process's binary. -// Uses os.Args[0]. -func Self() string { - return naiveSelf() -} - -// Command returns *exec.Cmd which has Path as current binary. -// For example if current binary is "docker.exe" at "C:\", then cmd.Path will -// be set to "C:\docker.exe". -func Command(args ...string) *exec.Cmd { - return &exec.Cmd{ - Path: Self(), - Args: args, - } -} diff --git a/vendor/github.com/docker/docker/pkg/reexec/reexec.go b/vendor/github.com/docker/docker/pkg/reexec/reexec.go deleted file mode 100644 index c56671d91927..000000000000 --- a/vendor/github.com/docker/docker/pkg/reexec/reexec.go +++ /dev/null @@ -1,47 +0,0 @@ -package reexec - -import ( - "fmt" - "os" - "os/exec" - "path/filepath" -) - -var registeredInitializers = make(map[string]func()) - -// Register adds an initialization func under the specified name -func Register(name string, initializer func()) { - if _, exists := registeredInitializers[name]; exists { - panic(fmt.Sprintf("reexec func already registered under name %q", name)) - } - - registeredInitializers[name] = initializer -} - -// Init is called as the first part of the exec process and returns true if an -// initialization function was called. -func Init() bool { - initializer, exists := registeredInitializers[os.Args[0]] - if exists { - initializer() - - return true - } - return false -} - -func naiveSelf() string { - name := os.Args[0] - if filepath.Base(name) == name { - if lp, err := exec.LookPath(name); err == nil { - return lp - } - } - // handle conversion of relative paths to absolute - if absName, err := filepath.Abs(name); err == nil { - return absName - } - // if we couldn't get absolute name, return original - // (NOTE: Go only errors on Abs() if os.Getwd fails) - return name -} diff --git a/vendor/github.com/moby/buildkit/README.md b/vendor/github.com/moby/buildkit/README.md index ddcbb01cebb7..4101bf43b589 100644 --- a/vendor/github.com/moby/buildkit/README.md +++ b/vendor/github.com/moby/buildkit/README.md @@ -1,10 +1,16 @@ -### Important: This repository is in an early development phase and not suitable for practical workloads. It does not compare with `docker build` features yet. +### Important: This repository is in an early development phase [![asciicinema example](https://asciinema.org/a/gPEIEo1NzmDTUu2bEPsUboqmU.png)](https://asciinema.org/a/gPEIEo1NzmDTUu2bEPsUboqmU) ## BuildKit + +[![GoDoc](https://godoc.org/github.com/moby/buildkit?status.svg)](https://godoc.org/github.com/moby/buildkit/client/llb) +[![Build Status](https://travis-ci.org/moby/buildkit.svg?branch=master)](https://travis-ci.org/moby/buildkit) +[![Go Report Card](https://goreportcard.com/badge/github.com/moby/buildkit)](https://goreportcard.com/report/github.com/moby/buildkit) + + BuildKit is a toolkit for converting source code to build artifacts in an efficient, expressive and repeatable manner. Key features: @@ -23,7 +29,7 @@ Read the proposal from https://github.com/moby/moby/issues/32925 #### Quick start -BuildKit daemon can be built in two different versions: one that uses [containerd](https://github.com/containerd/containerd) for execution and distribution, and a standalone version that doesn't have other dependencies apart from [runc](https://github.com/opencontainers/runc). We are open for adding more backends. `buildd` is a CLI utility for running the gRPC API. +BuildKit daemon can be built in two different versions: one that uses [containerd](https://github.com/containerd/containerd) for execution and distribution, and a standalone version that doesn't have other dependencies apart from [runc](https://github.com/opencontainers/runc). We are open for adding more backends. `buildd` is a CLI utility for serving the gRPC API. ```bash # buildd daemon (choose one) @@ -36,17 +42,15 @@ go build -o buildctl ./cmd/buildctl You can also use `make binaries` that prepares all binaries into the `bin/` directory. -The first thing to test could be to try building BuildKit with BuildKit. BuildKit provides a low-level solver format that could be used by multiple build definitions. Preparation work for making the Dockerfile parser reusable as a frontend is tracked in https://github.com/moby/moby/pull/33492. As no frontends have been integrated yet we currently have to use a client library to generate this low-level definition. - `examples/buildkit*` directory contains scripts that define how to build different configurations of BuildKit and its dependencies using the `client` package. Running one of these script generates a protobuf definition of a build graph. Note that the script itself does not execute any steps of the build. -You can use `buildctl debug dump-llb` to see what data is this definition. +You can use `buildctl debug dump-llb` to see what data is in this definition. Add `--dot` to generate dot layout. ```bash go run examples/buildkit0/buildkit.go | buildctl debug dump-llb | jq . ``` -To start building use `buildctl build` command. The script accepts `--target` flag to choose between `containerd` and `standalone` configurations. In standalone mode BuildKit binaries are built together with `runc`. In containerd mode, the `containerd` binary is built as well from the upstream repo. +To start building use `buildctl build` command. The example script accepts `--target` flag to choose between `containerd` and `standalone` configurations. In standalone mode BuildKit binaries are built together with `runc`. In containerd mode, the `containerd` binary is built as well from the upstream repo. ```bash go run examples/buildkit0/buildkit.go | buildctl build @@ -59,10 +63,52 @@ Different versions of the example scripts show different ways of describing the - `./examples/buildkit0` - uses only exec operations, defines a full stage per component. - `./examples/buildkit1` - cloning git repositories has been separated for extra concurrency. - `./examples/buildkit2` - uses git sources directly instead of running `git clone`, allowing better performance and much safer caching. +- `./examples/buildkit3` - allows using local source files for separate components eg. `./buildkit3 --runc=local | buildctl build --local runc-src=some/local/path` +- `./examples/dockerfile2llb` - can be used to convert a Dockerfile to LLB for debugging purposes +- `./examples/gobuild` - shows how to use nested invocation to generate LLB for Go package internal dependencies + + +#### Examples + +##### Starting the buildd daemon: + +``` +buildd-standalone --debug --root /var/lib/buildkit +``` + +##### Building a Dockerfile: + +``` +buildctl build --frontend=dockerfile.v0 --local context=. --local dockerfile=. +``` + +`context` and `dockerfile` should point to local directories for build context and Dockerfile location. + + +##### Exporting resulting image to containerd + +Containerd version of buildd needs to be used + +``` +buildctl build ... --exporter=image --exporter-opt name=docker.io/username/image +ctr --namespace=buildkit images ls +``` + +##### Exporting build result back to client + +``` +buildctl build ... --exporter=local --exporter-opt output=path/to/output-dir +``` + +#### View build cache + +``` +buildctl du -v +``` #### Supported runc version -During development buildkit is tested with the version of runc that is being used by the containerd repository. Please refer to [runc.md](https://github.com/containerd/containerd/blob/3707703a694187c7d08e2f333da6ddd58bcb729d/RUNC.md) for more information. +During development buildkit is tested with the version of runc that is being used by the containerd repository. Please refer to [runc.md](https://github.com/containerd/containerd/blob/d1e11f17ec7b325f89608dd46c128300b8727d50/RUNC.md) for more information. #### Contributing diff --git a/vendor/github.com/moby/buildkit/session/context.go b/vendor/github.com/moby/buildkit/session/context.go new file mode 100644 index 000000000000..31a29f0868a3 --- /dev/null +++ b/vendor/github.com/moby/buildkit/session/context.go @@ -0,0 +1,22 @@ +package session + +import "context" + +type contextKeyT string + +var contextKey = contextKeyT("buildkit/session-id") + +func NewContext(ctx context.Context, id string) context.Context { + if id != "" { + return context.WithValue(ctx, contextKey, id) + } + return ctx +} + +func FromContext(ctx context.Context) string { + v := ctx.Value(contextKey) + if v == nil { + return "" + } + return v.(string) +} diff --git a/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go b/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go index 58b29686cc5a..c5a3b5bd6e65 100644 --- a/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go +++ b/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go @@ -1,31 +1,55 @@ package filesync import ( + "os" "time" - "google.golang.org/grpc" - "github.com/sirupsen/logrus" "github.com/tonistiigi/fsutil" + "google.golang.org/grpc" ) -func sendDiffCopy(stream grpc.Stream, dir string, includes, excludes []string, progress progressCb) error { +func sendDiffCopy(stream grpc.Stream, dir string, includes, excludes []string, progress progressCb, _map func(*fsutil.Stat) bool) error { return fsutil.Send(stream.Context(), stream, dir, &fsutil.WalkOpt{ ExcludePatterns: excludes, - IncludePaths: includes, // TODO: rename IncludePatterns + IncludePatterns: includes, + Map: _map, }, progress) } -func recvDiffCopy(ds grpc.Stream, dest string, cu CacheUpdater) error { +func recvDiffCopy(ds grpc.Stream, dest string, cu CacheUpdater, progress progressCb) error { st := time.Now() defer func() { logrus.Debugf("diffcopy took: %v", time.Since(st)) }() var cf fsutil.ChangeFunc + var ch fsutil.ContentHasher if cu != nil { cu.MarkSupported(true) cf = cu.HandleChange + ch = cu.ContentHasher() } + return fsutil.Receive(ds.Context(), ds, dest, fsutil.ReceiveOpt{ + NotifyHashed: cf, + ContentHasher: ch, + ProgressCb: progress, + }) +} - return fsutil.Receive(ds.Context(), ds, dest, cf) +func syncTargetDiffCopy(ds grpc.Stream, dest string) error { + if err := os.MkdirAll(dest, 0700); err != nil { + return err + } + return fsutil.Receive(ds.Context(), ds, dest, fsutil.ReceiveOpt{ + Merge: true, + Filter: func() func(*fsutil.Stat) bool { + uid := os.Getuid() + gid := os.Getgid() + return func(st *fsutil.Stat) bool { + st.Uid = uint32(uid) + st.Gid = uint32(gid) + return true + } + }(), + }) } diff --git a/vendor/github.com/moby/buildkit/session/filesync/filesync.go b/vendor/github.com/moby/buildkit/session/filesync/filesync.go index fe4d00a729d0..5642f07ac449 100644 --- a/vendor/github.com/moby/buildkit/session/filesync/filesync.go +++ b/vendor/github.com/moby/buildkit/session/filesync/filesync.go @@ -1,6 +1,7 @@ package filesync import ( + "fmt" "os" "strings" @@ -15,20 +16,29 @@ import ( const ( keyOverrideExcludes = "override-excludes" keyIncludePatterns = "include-patterns" + keyDirName = "dir-name" ) type fsSyncProvider struct { - root string - excludes []string - p progressCb - doneCh chan error + dirs map[string]SyncedDir + p progressCb + doneCh chan error +} + +type SyncedDir struct { + Name string + Dir string + Excludes []string + Map func(*fsutil.Stat) bool } // NewFSSyncProvider creates a new provider for sending files from client -func NewFSSyncProvider(root string, excludes []string) session.Attachable { +func NewFSSyncProvider(dirs []SyncedDir) session.Attachable { p := &fsSyncProvider{ - root: root, - excludes: excludes, + dirs: map[string]SyncedDir{}, + } + for _, d := range dirs { + p.dirs[d.Name] = d } return p } @@ -58,9 +68,19 @@ func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) error opts, _ := metadata.FromContext(stream.Context()) // if no metadata continue with empty object + name, ok := opts[keyDirName] + if !ok || len(name) != 1 { + return errors.New("no dir name in request") + } + + dir, ok := sp.dirs[name[0]] + if !ok { + return errors.Errorf("no access allowed to dir %q", name[0]) + } + var excludes []string if len(opts[keyOverrideExcludes]) == 0 || opts[keyOverrideExcludes][0] != "true" { - excludes = sp.excludes + excludes = dir.Excludes } includes := opts[keyIncludePatterns] @@ -75,7 +95,7 @@ func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) error doneCh = sp.doneCh sp.doneCh = nil } - err := pr.sendFn(stream, sp.root, includes, excludes, progress) + err := pr.sendFn(stream, dir.Dir, includes, excludes, progress, dir.Map) if doneCh != nil { if err != nil { doneCh <- err @@ -94,8 +114,8 @@ type progressCb func(int, bool) type protocol struct { name string - sendFn func(stream grpc.Stream, srcDir string, includes, excludes []string, progress progressCb) error - recvFn func(stream grpc.Stream, destDir string, cu CacheUpdater) error + sendFn func(stream grpc.Stream, srcDir string, includes, excludes []string, progress progressCb, _map func(*fsutil.Stat) bool) error + recvFn func(stream grpc.Stream, destDir string, cu CacheUpdater, progress progressCb) error } func isProtoSupported(p string) bool { @@ -112,25 +132,23 @@ var supportedProtocols = []protocol{ sendFn: sendDiffCopy, recvFn: recvDiffCopy, }, - { - name: "tarstream", - sendFn: sendTarStream, - recvFn: recvTarStream, - }, } // FSSendRequestOpt defines options for FSSend request type FSSendRequestOpt struct { + Name string IncludePatterns []string OverrideExcludes bool DestDir string CacheUpdater CacheUpdater + ProgressCb func(int, bool) } // CacheUpdater is an object capable of sending notifications for the cache hash changes type CacheUpdater interface { MarkSupported(bool) HandleChange(fsutil.ChangeKind, string, os.FileInfo, error) error + ContentHasher() fsutil.ContentHasher } // FSSync initializes a transfer of files @@ -155,6 +173,8 @@ func FSSync(ctx context.Context, c session.Caller, opt FSSendRequestOpt) error { opts[keyIncludePatterns] = opt.IncludePatterns } + opts[keyDirName] = []string{opt.Name} + ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -177,7 +197,45 @@ func FSSync(ctx context.Context, c session.Caller, opt FSSendRequestOpt) error { return err } stream = cc + default: + panic(fmt.Sprintf("invalid protocol: %q", pr.name)) + } + + return pr.recvFn(stream, opt.DestDir, opt.CacheUpdater, opt.ProgressCb) +} + +// NewFSSyncTarget allows writing into a directory +func NewFSSyncTarget(outdir string) session.Attachable { + p := &fsSyncTarget{ + outdir: outdir, + } + return p +} + +type fsSyncTarget struct { + outdir string +} + +func (sp *fsSyncTarget) Register(server *grpc.Server) { + RegisterFileSendServer(server, sp) +} + +func (sp *fsSyncTarget) DiffCopy(stream FileSend_DiffCopyServer) error { + return syncTargetDiffCopy(stream, sp.outdir) +} + +func CopyToCaller(ctx context.Context, srcPath string, c session.Caller, progress func(int, bool)) error { + method := session.MethodURL(_FileSend_serviceDesc.ServiceName, "diffcopy") + if !c.Supports(method) { + return errors.Errorf("method %s not supported by the client", method) + } + + client := NewFileSendClient(c.Conn()) + + cc, err := client.DiffCopy(ctx) + if err != nil { + return err } - return pr.recvFn(stream, opt.DestDir, opt.CacheUpdater) + return sendDiffCopy(cc, srcPath, nil, nil, progress, nil) } diff --git a/vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go b/vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go index c6ed666383af..69c78886f2f3 100644 --- a/vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go +++ b/vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go @@ -277,6 +277,102 @@ var _FileSync_serviceDesc = grpc.ServiceDesc{ Metadata: "filesync.proto", } +// Client API for FileSend service + +type FileSendClient interface { + DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSend_DiffCopyClient, error) +} + +type fileSendClient struct { + cc *grpc.ClientConn +} + +func NewFileSendClient(cc *grpc.ClientConn) FileSendClient { + return &fileSendClient{cc} +} + +func (c *fileSendClient) DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSend_DiffCopyClient, error) { + stream, err := grpc.NewClientStream(ctx, &_FileSend_serviceDesc.Streams[0], c.cc, "/moby.filesync.v1.FileSend/DiffCopy", opts...) + if err != nil { + return nil, err + } + x := &fileSendDiffCopyClient{stream} + return x, nil +} + +type FileSend_DiffCopyClient interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ClientStream +} + +type fileSendDiffCopyClient struct { + grpc.ClientStream +} + +func (x *fileSendDiffCopyClient) Send(m *BytesMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *fileSendDiffCopyClient) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for FileSend service + +type FileSendServer interface { + DiffCopy(FileSend_DiffCopyServer) error +} + +func RegisterFileSendServer(s *grpc.Server, srv FileSendServer) { + s.RegisterService(&_FileSend_serviceDesc, srv) +} + +func _FileSend_DiffCopy_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(FileSendServer).DiffCopy(&fileSendDiffCopyServer{stream}) +} + +type FileSend_DiffCopyServer interface { + Send(*BytesMessage) error + Recv() (*BytesMessage, error) + grpc.ServerStream +} + +type fileSendDiffCopyServer struct { + grpc.ServerStream +} + +func (x *fileSendDiffCopyServer) Send(m *BytesMessage) error { + return x.ServerStream.SendMsg(m) +} + +func (x *fileSendDiffCopyServer) Recv() (*BytesMessage, error) { + m := new(BytesMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _FileSend_serviceDesc = grpc.ServiceDesc{ + ServiceName: "moby.filesync.v1.FileSend", + HandlerType: (*FileSendServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "DiffCopy", + Handler: _FileSend_DiffCopy_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "filesync.proto", +} + func (m *BytesMessage) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -558,7 +654,7 @@ var ( func init() { proto.RegisterFile("filesync.proto", fileDescriptorFilesync) } var fileDescriptorFilesync = []byte{ - // 198 bytes of a gzipped FileDescriptorProto + // 208 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x4b, 0xcb, 0xcc, 0x49, 0x2d, 0xae, 0xcc, 0x4b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xc8, 0xcd, 0x4f, 0xaa, 0xd4, 0x83, 0x0b, 0x96, 0x19, 0x2a, 0x29, 0x71, 0xf1, 0x38, 0x55, 0x96, 0xa4, 0x16, 0xfb, 0xa6, @@ -566,10 +662,10 @@ var fileDescriptorFilesync = []byte{ 0x30, 0x6a, 0xf0, 0x04, 0x81, 0xd9, 0x46, 0xab, 0x19, 0xb9, 0x38, 0xdc, 0x32, 0x73, 0x52, 0x83, 0x2b, 0xf3, 0x92, 0x85, 0xfc, 0xb8, 0x38, 0x5c, 0x32, 0xd3, 0xd2, 0x9c, 0xf3, 0x0b, 0x2a, 0x85, 0xe4, 0xf4, 0xd0, 0xcd, 0xd3, 0x43, 0x36, 0x4c, 0x8a, 0x80, 0xbc, 0x06, 0xa3, 0x01, 0xa3, 0x90, - 0x3f, 0x17, 0x67, 0x48, 0x62, 0x51, 0x70, 0x49, 0x51, 0x6a, 0x62, 0x2e, 0x35, 0x0c, 0x74, 0x32, - 0xbb, 0xf0, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x0f, 0x0f, 0xe5, 0x18, 0x1b, 0x1e, 0xc9, - 0x31, 0xae, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, - 0xc9, 0x31, 0xbe, 0x78, 0x24, 0xc7, 0xf0, 0xe1, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x51, - 0x1c, 0x30, 0xb3, 0x92, 0xd8, 0xc0, 0x41, 0x64, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x5f, 0x0c, - 0x8d, 0xc5, 0x34, 0x01, 0x00, 0x00, + 0x3f, 0x17, 0x67, 0x48, 0x62, 0x51, 0x70, 0x49, 0x51, 0x6a, 0x62, 0x2e, 0x35, 0x0c, 0x34, 0x8a, + 0x82, 0x3a, 0x36, 0x35, 0x2f, 0x85, 0xda, 0x8e, 0x75, 0x32, 0xbb, 0xf0, 0x50, 0x8e, 0xe1, 0xc6, + 0x43, 0x39, 0x86, 0x0f, 0x0f, 0xe5, 0x18, 0x1b, 0x1e, 0xc9, 0x31, 0xae, 0x78, 0x24, 0xc7, 0x78, + 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78, 0x24, 0xc7, + 0xf0, 0xe1, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x51, 0x1c, 0x30, 0xb3, 0x92, 0xd8, 0xc0, + 0xc1, 0x6f, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x72, 0x81, 0x1a, 0x91, 0x90, 0x01, 0x00, 0x00, } diff --git a/vendor/github.com/moby/buildkit/session/filesync/filesync.proto b/vendor/github.com/moby/buildkit/session/filesync/filesync.proto index 2fd5b3ec8d25..0ae293736809 100644 --- a/vendor/github.com/moby/buildkit/session/filesync/filesync.proto +++ b/vendor/github.com/moby/buildkit/session/filesync/filesync.proto @@ -9,6 +9,11 @@ service FileSync{ rpc TarStream(stream BytesMessage) returns (stream BytesMessage); } +service FileSend{ + rpc DiffCopy(stream BytesMessage) returns (stream BytesMessage); +} + + // BytesMessage contains a chunk of byte data message BytesMessage{ bytes data = 1; diff --git a/vendor/github.com/moby/buildkit/session/filesync/tarstream.go b/vendor/github.com/moby/buildkit/session/filesync/tarstream.go deleted file mode 100644 index 5cab867498fb..000000000000 --- a/vendor/github.com/moby/buildkit/session/filesync/tarstream.go +++ /dev/null @@ -1,83 +0,0 @@ -package filesync - -import ( - "io" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "google.golang.org/grpc" -) - -func sendTarStream(stream grpc.Stream, dir string, includes, excludes []string, progress progressCb) error { - a, err := archive.TarWithOptions(dir, &archive.TarOptions{ - ExcludePatterns: excludes, - }) - if err != nil { - return err - } - - size := 0 - buf := make([]byte, 1<<15) - t := new(BytesMessage) - for { - n, err := a.Read(buf) - if err != nil { - if err == io.EOF { - break - } - return err - } - t.Data = buf[:n] - - if err := stream.SendMsg(t); err != nil { - return err - } - size += n - if progress != nil { - progress(size, false) - } - } - if progress != nil { - progress(size, true) - } - return nil -} - -func recvTarStream(ds grpc.Stream, dest string, cs CacheUpdater) error { - - pr, pw := io.Pipe() - - go func() { - var ( - err error - t = new(BytesMessage) - ) - for { - if err = ds.RecvMsg(t); err != nil { - if err == io.EOF { - err = nil - } - break - } - _, err = pw.Write(t.Data) - if err != nil { - break - } - } - if err = pw.CloseWithError(err); err != nil { - logrus.Errorf("failed to close tar transfer pipe") - } - }() - - decompressedStream, err := archive.DecompressStream(pr) - if err != nil { - return errors.Wrap(err, "failed to decompress stream") - } - - if err := chrootarchive.Untar(decompressedStream, dest, nil); err != nil { - return errors.Wrap(err, "failed to untar context") - } - return nil -} diff --git a/vendor/github.com/moby/buildkit/session/manager.go b/vendor/github.com/moby/buildkit/session/manager.go index 9523e6f317bd..b3e5955652a8 100644 --- a/vendor/github.com/moby/buildkit/session/manager.go +++ b/vendor/github.com/moby/buildkit/session/manager.go @@ -49,14 +49,14 @@ func (sm *Manager) HandleHTTPRequest(ctx context.Context, w http.ResponseWriter, return errors.New("handler does not support hijack") } - uuid := r.Header.Get(headerSessionUUID) + id := r.Header.Get(headerSessionID) proto := r.Header.Get("Upgrade") sm.mu.Lock() - if _, ok := sm.sessions[uuid]; ok { + if _, ok := sm.sessions[id]; ok { sm.mu.Unlock() - return errors.Errorf("session %s already exists", uuid) + return errors.Errorf("session %s already exists", id) } if proto == "" { @@ -102,8 +102,10 @@ func (sm *Manager) handleConn(ctx context.Context, conn net.Conn, opts map[strin ctx, cancel := context.WithCancel(ctx) defer cancel() + opts = canonicalHeaders(opts) + h := http.Header(opts) - uuid := h.Get(headerSessionUUID) + id := h.Get(headerSessionID) name := h.Get(headerSessionName) sharedKey := h.Get(headerSessionSharedKey) @@ -115,7 +117,7 @@ func (sm *Manager) handleConn(ctx context.Context, conn net.Conn, opts map[strin c := &client{ Session: Session{ - uuid: uuid, + id: id, name: name, sharedKey: sharedKey, ctx: ctx, @@ -129,13 +131,13 @@ func (sm *Manager) handleConn(ctx context.Context, conn net.Conn, opts map[strin for _, m := range opts[headerSessionMethod] { c.supported[strings.ToLower(m)] = struct{}{} } - sm.sessions[uuid] = c + sm.sessions[id] = c sm.updateCondition.Broadcast() sm.mu.Unlock() defer func() { sm.mu.Lock() - delete(sm.sessions, uuid) + delete(sm.sessions, id) sm.mu.Unlock() }() @@ -146,8 +148,8 @@ func (sm *Manager) handleConn(ctx context.Context, conn net.Conn, opts map[strin return nil } -// Get returns a session by UUID -func (sm *Manager) Get(ctx context.Context, uuid string) (Caller, error) { +// Get returns a session by ID +func (sm *Manager) Get(ctx context.Context, id string) (Caller, error) { ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -165,11 +167,11 @@ func (sm *Manager) Get(ctx context.Context, uuid string) (Caller, error) { select { case <-ctx.Done(): sm.mu.Unlock() - return nil, errors.Wrapf(ctx.Err(), "no active session for %s", uuid) + return nil, errors.Wrapf(ctx.Err(), "no active session for %s", id) default: } var ok bool - c, ok = sm.sessions[uuid] + c, ok = sm.sessions[id] if !ok || c.closed() { sm.updateCondition.Wait() continue @@ -200,3 +202,11 @@ func (c *client) Supports(url string) bool { func (c *client) Conn() *grpc.ClientConn { return c.cc } + +func canonicalHeaders(in map[string][]string) map[string][]string { + out := map[string][]string{} + for k := range in { + out[http.CanonicalHeaderKey(k)] = in[k] + } + return out +} diff --git a/vendor/github.com/moby/buildkit/session/session.go b/vendor/github.com/moby/buildkit/session/session.go index 147486a75bfa..454c3d7f3f56 100644 --- a/vendor/github.com/moby/buildkit/session/session.go +++ b/vendor/github.com/moby/buildkit/session/session.go @@ -12,7 +12,7 @@ import ( ) const ( - headerSessionUUID = "X-Docker-Expose-Session-Uuid" + headerSessionID = "X-Docker-Expose-Session-Uuid" headerSessionName = "X-Docker-Expose-Session-Name" headerSessionSharedKey = "X-Docker-Expose-Session-Sharedkey" headerSessionMethod = "X-Docker-Expose-Session-Grpc-Method" @@ -28,7 +28,7 @@ type Attachable interface { // Session is a long running connection between client and a daemon type Session struct { - uuid string + id string name string sharedKey string ctx context.Context @@ -39,9 +39,9 @@ type Session struct { // NewSession returns a new long running session func NewSession(name, sharedKey string) (*Session, error) { - uuid := stringid.GenerateRandomID() + id := stringid.GenerateRandomID() s := &Session{ - uuid: uuid, + id: id, name: name, sharedKey: sharedKey, grpcServer: grpc.NewServer(), @@ -57,9 +57,9 @@ func (s *Session) Allow(a Attachable) { a.Register(s.grpcServer) } -// UUID returns unique identifier for the session -func (s *Session) UUID() string { - return s.uuid +// ID returns unique identifier for the session +func (s *Session) ID() string { + return s.id } // Run activates the session @@ -72,7 +72,7 @@ func (s *Session) Run(ctx context.Context, dialer Dialer) error { defer close(s.done) meta := make(map[string][]string) - meta[headerSessionUUID] = []string{s.uuid} + meta[headerSessionID] = []string{s.id} meta[headerSessionName] = []string{s.name} meta[headerSessionSharedKey] = []string{s.sharedKey} @@ -92,6 +92,7 @@ func (s *Session) Run(ctx context.Context, dialer Dialer) error { // Close closes the session func (s *Session) Close() error { if s.cancelCtx != nil && s.done != nil { + s.grpcServer.Stop() s.cancelCtx() <-s.done } diff --git a/vendor/github.com/moby/buildkit/vendor.conf b/vendor/github.com/moby/buildkit/vendor.conf index b13cfa96750b..f3760bd0f78e 100644 --- a/vendor/github.com/moby/buildkit/vendor.conf +++ b/vendor/github.com/moby/buildkit/vendor.conf @@ -6,26 +6,26 @@ github.com/davecgh/go-spew v1.1.0 github.com/pmezard/go-difflib v1.0.0 golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f -github.com/containerd/containerd 3707703a694187c7d08e2f333da6ddd58bcb729d -golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c -github.com/Sirupsen/logrus v0.11.0 +github.com/containerd/containerd d1e11f17ec7b325f89608dd46c128300b8727d50 +golang.org/x/sync f52d1811a62927559de87708c8913c1650ce4f26 +github.com/sirupsen/logrus v1.0.0 google.golang.org/grpc v1.3.0 github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448 golang.org/x/net 1f9224279e98554b6a6432d4dd998a739f8b2b7c github.com/gogo/protobuf d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8 github.com/golang/protobuf 5a0f697c9ed9d68fef0116532c6e05cfeae00e55 github.com/containerd/continuity 86cec1535a968310e7532819f699ff2830ed7463 -github.com/opencontainers/image-spec v1.0.0-rc6 -github.com/opencontainers/runc 429a5387123625040bacfbb60d96b1cbd02293ab +github.com/opencontainers/image-spec v1.0.0 +github.com/opencontainers/runc e775f0fba3ea329b8b766451c892c41a3d49594d github.com/Microsoft/go-winio v0.4.1 github.com/containerd/fifo 69b99525e472735860a5269b75af1970142b3062 -github.com/opencontainers/runtime-spec 198f23f827eea397d4331d7eb048d9d4c7ff7bee +github.com/opencontainers/runtime-spec 96de01bbb42c7af89bff100e10a9f0fb62e75bfb github.com/containerd/go-runc 2774a2ea124a5c2d0aba13b5c2dd8a5a9a48775d github.com/containerd/console 7fed77e673ca4abcd0cbd6d4d0e0e22137cbd778 -github.com/Azure/go-ansiterm fa152c58bc15761d0200cb75fe958b89a9d4888e +github.com/Azure/go-ansiterm 19f72df4d05d31cbe1c56bfc8045c96babff6c7e google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4 -github.com/docker/go-events aa2e3b613fbbfdddbe055a7b9e3ce271cfd83eca +github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9 github.com/urfave/cli d70f47eeca3afd795160003bc6e28b001d60c67c github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52 @@ -33,8 +33,14 @@ github.com/google/shlex 6f45313302b9c56850fc17f99e40caebce98c716 golang.org/x/time 8be79e1e0910c292df4e79c241bb7e8f7e725959 github.com/BurntSushi/locker 392720b78f44e9d0249fcac6c43b111b47a370b8 -github.com/docker/docker 05c7c311390911daebcf5d9519dee813fc02a887 +github.com/docker/docker 6f723db8c6f0c7f0b252674a9673a25b5978db04 https://github.com/tonistiigi/docker.git github.com/pkg/profile 5b67d428864e92711fcbd2f8629456121a56d91f -github.com/tonistiigi/fsutil 0ac4c11b053b9c5c7c47558f81f96c7100ce50fb +github.com/tonistiigi/fsutil 1dedf6e90084bd88c4c518a15e68a37ed1370203 github.com/stevvooe/continuity 86cec1535a968310e7532819f699ff2830ed7463 +github.com/dmcgowan/go-tar 2e2c51242e8993c50445dab7c03c8e7febddd0cf +github.com/hashicorp/go-immutable-radix 826af9ccf0feeee615d546d69b11f8e98da8c8f1 git://github.com/tonistiigi/go-immutable-radix.git +github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4 +github.com/mitchellh/hashstructure 2bca23e0e452137f789efbc8610126fd8b94f73b +github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d +github.com/docker/distribution 30578ca32960a4d368bf6db67b0a33c2a1f3dc6f diff --git a/vendor/github.com/tonistiigi/fsutil/diff.go b/vendor/github.com/tonistiigi/fsutil/diff.go index 1530973784d0..6125ef73af64 100644 --- a/vendor/github.com/tonistiigi/fsutil/diff.go +++ b/vendor/github.com/tonistiigi/fsutil/diff.go @@ -1,6 +1,7 @@ package fsutil import ( + "hash" "os" "golang.org/x/net/context" @@ -14,6 +15,8 @@ func Changes(ctx context.Context, a, b walkerFn, changeFn ChangeFunc) error { type HandleChangeFn func(ChangeKind, string, os.FileInfo, error) error +type ContentHasher func(*Stat) (hash.Hash, error) + func GetWalkerFn(root string) walkerFn { return func(ctx context.Context, pathC chan<- *currentPath) error { return Walk(ctx, root, nil, func(path string, f os.FileInfo, err error) error { @@ -35,3 +38,7 @@ func GetWalkerFn(root string) walkerFn { }) } } + +func emptyWalker(ctx context.Context, pathC chan<- *currentPath) error { + return nil +} diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter.go b/vendor/github.com/tonistiigi/fsutil/diskwriter.go index a54b4a737a84..a465615c3ba4 100644 --- a/vendor/github.com/tonistiigi/fsutil/diskwriter.go +++ b/vendor/github.com/tonistiigi/fsutil/diskwriter.go @@ -1,11 +1,6 @@ -// +build linux windows - package fsutil import ( - "archive/tar" - "crypto/sha256" - "encoding/hex" "hash" "io" "os" @@ -14,8 +9,7 @@ import ( "sync" "time" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/tarsum" + digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" "golang.org/x/net/context" "golang.org/x/sync/errgroup" @@ -24,11 +18,15 @@ import ( type WriteToFunc func(context.Context, string, io.WriteCloser) error type DiskWriterOpt struct { - AsyncDataCb WriteToFunc - SyncDataCb WriteToFunc - NotifyCb func(ChangeKind, string, os.FileInfo, error) error + AsyncDataCb WriteToFunc + SyncDataCb WriteToFunc + NotifyCb func(ChangeKind, string, os.FileInfo, error) error + ContentHasher ContentHasher + Filter FilterFunc } +type FilterFunc func(*Stat) bool + type DiskWriter struct { opt DiskWriterOpt dest string @@ -37,6 +35,7 @@ type DiskWriter struct { ctx context.Context cancel func() eg *errgroup.Group + filter FilterFunc } func NewDiskWriter(ctx context.Context, dest string, opt DiskWriterOpt) (*DiskWriter, error) { @@ -102,6 +101,12 @@ func (dw *DiskWriter) HandleChange(kind ChangeKind, p string, fi os.FileInfo, er return errors.Errorf("%s invalid change without stat information", p) } + if dw.filter != nil { + if ok := dw.filter(stat); !ok { + return nil + } + } + rename := true oldFi, err := os.Lstat(destPath) if err != nil { @@ -202,7 +207,7 @@ func (dw *DiskWriter) processChange(kind ChangeKind, p string, fi os.FileInfo, w var hw *hashedWriter if dw.opt.NotifyCb != nil { var err error - if hw, err = newHashWriter(p, fi, w); err != nil { + if hw, err = newHashWriter(dw.opt.ContentHasher, fi, w); err != nil { return err } w = hw @@ -229,13 +234,18 @@ func (dw *DiskWriter) processChange(kind ChangeKind, p string, fi os.FileInfo, w type hashedWriter struct { os.FileInfo io.Writer - h hash.Hash - w io.WriteCloser - sum string + h hash.Hash + w io.WriteCloser + dgst digest.Digest } -func newHashWriter(p string, fi os.FileInfo, w io.WriteCloser) (*hashedWriter, error) { - h, err := NewTarsumHash(p, fi) +func newHashWriter(ch ContentHasher, fi os.FileInfo, w io.WriteCloser) (*hashedWriter, error) { + stat, ok := fi.Sys().(*Stat) + if !ok { + return nil, errors.Errorf("invalid change without stat information") + } + + h, err := ch(stat) if err != nil { return nil, err } @@ -249,15 +259,15 @@ func newHashWriter(p string, fi os.FileInfo, w io.WriteCloser) (*hashedWriter, e } func (hw *hashedWriter) Close() error { - hw.sum = string(hex.EncodeToString(hw.h.Sum(nil))) + hw.dgst = digest.NewDigest(digest.SHA256, hw.h) if hw.w != nil { return hw.w.Close() } return nil } -func (hw *hashedWriter) Hash() string { - return hw.sum +func (hw *hashedWriter) Digest() digest.Digest { + return hw.dgst } type lazyFileWriter struct { @@ -310,44 +320,3 @@ func nextSuffix() string { randmu.Unlock() return strconv.Itoa(int(1e9 + r%1e9))[1:] } - -func NewTarsumHash(p string, fi os.FileInfo) (hash.Hash, error) { - stat, ok := fi.Sys().(*Stat) - link := "" - if ok { - link = stat.Linkname - } - if fi.IsDir() { - p += string(os.PathSeparator) - } - h, err := archive.FileInfoHeader(p, fi, link) - if err != nil { - return nil, err - } - h.Name = p - if ok { - h.Uid = int(stat.Uid) - h.Gid = int(stat.Gid) - h.Linkname = stat.Linkname - if stat.Xattrs != nil { - h.Xattrs = make(map[string]string) - for k, v := range stat.Xattrs { - h.Xattrs[k] = string(v) - } - } - } - tsh := &tarsumHash{h: h, Hash: sha256.New()} - tsh.Reset() - return tsh, nil -} - -// Reset resets the Hash to its initial state. -func (tsh *tarsumHash) Reset() { - tsh.Hash.Reset() - tarsum.WriteV1Header(tsh.h, tsh.Hash) -} - -type tarsumHash struct { - hash.Hash - h *tar.Header -} diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter_darwin.go b/vendor/github.com/tonistiigi/fsutil/diskwriter_darwin.go new file mode 100644 index 000000000000..94d3324acf7e --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/diskwriter_darwin.go @@ -0,0 +1,7 @@ +// +build darwin + +package fsutil + +func chtimes(path string, un int64) error { + return nil +} diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter_linux.go b/vendor/github.com/tonistiigi/fsutil/diskwriter_linux.go index c6d97eb0a6de..74f08a15caa7 100644 --- a/vendor/github.com/tonistiigi/fsutil/diskwriter_linux.go +++ b/vendor/github.com/tonistiigi/fsutil/diskwriter_linux.go @@ -3,36 +3,10 @@ package fsutil import ( - "os" - "syscall" - "github.com/pkg/errors" - "github.com/stevvooe/continuity/sysx" "golang.org/x/sys/unix" ) -func rewriteMetadata(p string, stat *Stat) error { - for key, value := range stat.Xattrs { - sysx.Setxattr(p, key, value, 0) - } - - if err := os.Lchown(p, int(stat.Uid), int(stat.Gid)); err != nil { - return errors.Wrapf(err, "failed to lchown %s", p) - } - - if os.FileMode(stat.Mode)&os.ModeSymlink == 0 { - if err := os.Chmod(p, os.FileMode(stat.Mode)); err != nil { - return errors.Wrapf(err, "failed to chown %s", p) - } - } - - if err := chtimes(p, stat.ModTime); err != nil { - return errors.Wrapf(err, "failed to chtimes %s", p) - } - - return nil -} - func chtimes(path string, un int64) error { var utimes [2]unix.Timespec utimes[0] = unix.NsecToTimespec(un) @@ -44,21 +18,3 @@ func chtimes(path string, un int64) error { return nil } - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(path string, stat *Stat) error { - mode := uint32(stat.Mode & 07777) - if os.FileMode(stat.Mode)&os.ModeCharDevice != 0 { - mode |= syscall.S_IFCHR - } else if os.FileMode(stat.Mode)&os.ModeNamedPipe != 0 { - mode |= syscall.S_IFIFO - } else { - mode |= syscall.S_IFBLK - } - - if err := syscall.Mknod(path, mode, int(mkdev(stat.Devmajor, stat.Devminor))); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go b/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go new file mode 100644 index 000000000000..5f51fce3b4d4 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go @@ -0,0 +1,51 @@ +// +build !windows + +package fsutil + +import ( + "os" + "syscall" + + "github.com/pkg/errors" + "github.com/stevvooe/continuity/sysx" +) + +func rewriteMetadata(p string, stat *Stat) error { + for key, value := range stat.Xattrs { + sysx.Setxattr(p, key, value, 0) + } + + if err := os.Lchown(p, int(stat.Uid), int(stat.Gid)); err != nil { + return errors.Wrapf(err, "failed to lchown %s", p) + } + + if os.FileMode(stat.Mode)&os.ModeSymlink == 0 { + if err := os.Chmod(p, os.FileMode(stat.Mode)); err != nil { + return errors.Wrapf(err, "failed to chown %s", p) + } + } + + if err := chtimes(p, stat.ModTime); err != nil { + return errors.Wrapf(err, "failed to chtimes %s", p) + } + + return nil +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(path string, stat *Stat) error { + mode := uint32(stat.Mode & 07777) + if os.FileMode(stat.Mode)&os.ModeCharDevice != 0 { + mode |= syscall.S_IFCHR + } else if os.FileMode(stat.Mode)&os.ModeNamedPipe != 0 { + mode |= syscall.S_IFIFO + } else { + mode |= syscall.S_IFBLK + } + + if err := syscall.Mknod(path, mode, int(mkdev(stat.Devmajor, stat.Devminor))); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/tonistiigi/fsutil/receive.go b/vendor/github.com/tonistiigi/fsutil/receive.go index e7cee2b7ce9a..233c28b70ee7 100644 --- a/vendor/github.com/tonistiigi/fsutil/receive.go +++ b/vendor/github.com/tonistiigi/fsutil/receive.go @@ -1,5 +1,3 @@ -// +build linux windows - package fsutil import ( @@ -12,29 +10,45 @@ import ( "golang.org/x/sync/errgroup" ) -func Receive(ctx context.Context, conn Stream, dest string, notifyHashed ChangeFunc) error { +type ReceiveOpt struct { + NotifyHashed ChangeFunc + ContentHasher ContentHasher + ProgressCb func(int, bool) + Merge bool + Filter FilterFunc +} + +func Receive(ctx context.Context, conn Stream, dest string, opt ReceiveOpt) error { ctx, cancel := context.WithCancel(context.Background()) defer cancel() r := &receiver{ - conn: &syncStream{Stream: conn}, - dest: dest, - files: make(map[string]uint32), - pipes: make(map[uint32]io.WriteCloser), - notifyHashed: notifyHashed, + conn: &syncStream{Stream: conn}, + dest: dest, + files: make(map[string]uint32), + pipes: make(map[uint32]io.WriteCloser), + notifyHashed: opt.NotifyHashed, + contentHasher: opt.ContentHasher, + progressCb: opt.ProgressCb, + merge: opt.Merge, + filter: opt.Filter, } return r.run(ctx) } type receiver struct { - dest string - conn Stream - files map[string]uint32 - pipes map[uint32]io.WriteCloser - mu sync.RWMutex - muPipes sync.RWMutex + dest string + conn Stream + files map[string]uint32 + pipes map[uint32]io.WriteCloser + mu sync.RWMutex + muPipes sync.RWMutex + progressCb func(int, bool) + merge bool + filter FilterFunc notifyHashed ChangeFunc + contentHasher ContentHasher orderValidator Validator hlValidator Hardlinks } @@ -81,8 +95,10 @@ func (r *receiver) run(ctx context.Context) error { g, ctx := errgroup.WithContext(ctx) dw, err := NewDiskWriter(ctx, r.dest, DiskWriterOpt{ - AsyncDataCb: r.asyncDataFunc, - NotifyCb: r.notifyHashed, + AsyncDataCb: r.asyncDataFunc, + NotifyCb: r.notifyHashed, + ContentHasher: r.contentHasher, + Filter: r.filter, }) if err != nil { return err @@ -91,7 +107,11 @@ func (r *receiver) run(ctx context.Context) error { w := newDynamicWalker() g.Go(func() error { - err := doubleWalkDiff(ctx, dw.HandleChange, GetWalkerFn(r.dest), w.fill) + destWalker := emptyWalker + if !r.merge { + destWalker = GetWalkerFn(r.dest) + } + err := doubleWalkDiff(ctx, dw.HandleChange, destWalker, w.fill) if err != nil { return err } @@ -105,12 +125,23 @@ func (r *receiver) run(ctx context.Context) error { g.Go(func() error { var i uint32 = 0 + size := 0 + if r.progressCb != nil { + defer func() { + r.progressCb(size, true) + }() + } var p Packet for { p = Packet{Data: p.Data[:0]} if err := r.conn.RecvMsg(&p); err != nil { return err } + if r.progressCb != nil { + size += p.Size() + r.progressCb(size, false) + } + switch p.Type { case PACKET_STAT: if p.Stat == nil { diff --git a/vendor/github.com/tonistiigi/fsutil/receive_unsupported.go b/vendor/github.com/tonistiigi/fsutil/receive_unsupported.go deleted file mode 100644 index 8e8334237499..000000000000 --- a/vendor/github.com/tonistiigi/fsutil/receive_unsupported.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !linux,!windows - -package fsutil - -import ( - "runtime" - - "github.com/pkg/errors" - "golang.org/x/net/context" -) - -func Receive(ctx context.Context, conn Stream, dest string, notifyHashed ChangeFunc) error { - return errors.Errorf("receive is unsupported in %s", runtime.GOOS) -} diff --git a/vendor/github.com/tonistiigi/fsutil/validator.go b/vendor/github.com/tonistiigi/fsutil/validator.go index e4a5eba66b08..2bd1287a8535 100644 --- a/vendor/github.com/tonistiigi/fsutil/validator.go +++ b/vendor/github.com/tonistiigi/fsutil/validator.go @@ -2,7 +2,8 @@ package fsutil import ( "os" - "path/filepath" + "path" + "runtime" "sort" "strings" @@ -26,14 +27,17 @@ func (v *Validator) HandleChange(kind ChangeKind, p string, fi os.FileInfo, err if v.parentDirs == nil { v.parentDirs = make([]parent, 1, 10) } - if p != filepath.Clean(p) { + if runtime.GOOS == "windows" { + p = strings.Replace(p, "\\", "", -1) + } + if p != path.Clean(p) { return errors.Errorf("invalid unclean path %s", p) } - if filepath.IsAbs(p) { + if path.IsAbs(p) { return errors.Errorf("abolute path %s not allowed", p) } - dir := filepath.Dir(p) - base := filepath.Base(p) + dir := path.Dir(p) + base := path.Base(p) if dir == "." { dir = "" } @@ -51,12 +55,12 @@ func (v *Validator) HandleChange(kind ChangeKind, p string, fi os.FileInfo, err } if dir != v.parentDirs[len(v.parentDirs)-1].dir || v.parentDirs[i].last >= base { - return errors.Errorf("changes out of order: %q %q", p, filepath.Join(v.parentDirs[i].dir, v.parentDirs[i].last)) + return errors.Errorf("changes out of order: %q %q", p, path.Join(v.parentDirs[i].dir, v.parentDirs[i].last)) } v.parentDirs[i].last = base if kind != ChangeKindDelete && fi.IsDir() { v.parentDirs = append(v.parentDirs, parent{ - dir: filepath.Join(dir, base), + dir: path.Join(dir, base), last: "", }) } diff --git a/vendor/github.com/tonistiigi/fsutil/walker.go b/vendor/github.com/tonistiigi/fsutil/walker.go index bfec609b5a45..db1af56b49de 100644 --- a/vendor/github.com/tonistiigi/fsutil/walker.go +++ b/vendor/github.com/tonistiigi/fsutil/walker.go @@ -13,8 +13,9 @@ import ( ) type WalkOpt struct { - IncludePaths []string // todo: remove? + IncludePatterns []string ExcludePatterns []string + Map func(*Stat) bool } func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) error { @@ -57,9 +58,9 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err } if opt != nil { - if opt.IncludePaths != nil { + if opt.IncludePatterns != nil { matched := false - for _, p := range opt.IncludePaths { + for _, p := range opt.IncludePatterns { if m, _ := filepath.Match(p, path); m { matched = true break @@ -138,7 +139,12 @@ func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) err case <-ctx.Done(): return ctx.Err() default: - if err := fn(path, &StatInfo{stat}, nil); err != nil { + if opt != nil && opt.Map != nil { + if allowed := opt.Map(stat); !allowed { + return nil + } + } + if err := fn(stat.Path, &StatInfo{stat}, nil); err != nil { return err } }